def wrapped(*args, **kwargs):
        if is_options:
            options = _clone_options(args[0])
            args = args[1:]
        else:
            options = dict(_default_options)
        if is_retries:
            retries = args[0]
            args = args[1:]
        else:
            retries = options['retries']
        options['retries'] = 0
        if options.get('propagation') is None and _temp_transaction_options.propagation:
            options['propagation'] = db.ALLOWED
        options = db.create_transaction_options(**options)

        if db.is_in_transaction():
            return _orig_run_in_transaction_options(options, *args, **kwargs)

        if not retries:
            retries = 3
        if APPSCALE:
            retries += 3

        def run(transaction_guid):
            max_tries = retries + 1
            count = 0
            while count < max_tries:
                count += 1
                start = time.time()
                try:
                    return _orig_run_in_transaction_options(options, *args, **kwargs)
                except (TransactionFailedError, db.Timeout) as e:
                    if isinstance(e, db.Timeout) and type(e) != db.Timeout:
                        raise e  # only retrying in case of db.Timeout exceptions, not subclasses
                    if count == max_tries:
                        raise e
                    transactions.post_transaction_actions.reset(transaction_guid)
                    logging.info("%s: %s. Retrying... (%s)", e.__class__.__name__, e.message, count)
                    sleep_time = 1.1 - (time.time() - start)
                    if sleep_time > 0:
                        logging.info("Sleeping %s seconds ....", sleep_time)
                        time.sleep(sleep_time)

        from rogerthat.utils import transactions
        if db.is_in_transaction():
            transaction_guid = transactions.post_transaction_actions.get_current_transaction_guid()
        else:
            transaction_guid = str(uuid.uuid4())
            transactions.post_transaction_actions.set_current_transaction_guid(transaction_guid)
        try:
            r = run(transaction_guid)
        except:
            transactions.post_transaction_actions.finalize(success=False, transaction_guid=transaction_guid)
            raise
        try:
            transactions.post_transaction_actions.finalize(success=True, transaction_guid=transaction_guid)
        except:
            logging.error("Caught exception in rpc.transaction_done", exc_info=1, _suppress=False)
        return r
            def _send_client_call(mobile_detail, cc, user, method):
                from rogerthat.rpc.calls import capi_priority_mapping
                now_ = now()
                call_id = unicode(uuid.uuid1())
                cc[CALL_ID] = call_id
                cc[CALL_TIMESTAMP] = now_
                message = json.dumps(cc)
                rpc_capi_call = RpcCAPICall(parent=get_rpc_capi_backlog_parent_by_account(user, mobile_detail.account),
                                            key_name=call_id, timestamp=now_, call=message, \
                                            priority=capi_priority_mapping[cc[FUNCTION]], \
                                            resultFunction=result_f.meta[u"mapping"], \
                                            errorFunction=error_f.meta[u"mapping"], deferredKick=DEFER_KICK in kwargs, method=method)
                # TODO: make this the default and make 'MOBILE_ACCOUNT' parameter mandatory
                if not DO_NOT_SAVE_RPCCALL_OBJECTS in kwargs:
                    rpc_capi_call.put()
                if mobile_detail.type_ in (Mobile.TYPE_IPHONE_HTTP_APNS_KICK, Mobile.TYPE_IPHONE_HTTP_XMPP_KICK,
                                           Mobile.TYPE_ANDROID_FIREBASE_HTTP, Mobile.TYPE_ANDROID_HTTP,
                                           Mobile.TYPE_WINDOWS_PHONE,
                                           Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE):
                    prio = kwargs.get(CAPI_KEYWORD_ARG_PRIORITY, priority)
                    if mobile_detail.type_ in Mobile.ANDROID_TYPES and mobile_detail.pushId:
                        if mobile_detail.type_ == Mobile.TYPE_ANDROID_FIREBASE_HTTP:
                            # Kick via Firebase
                            if db.is_in_transaction():
                                on_trans_committed(firebase.kick, mobile_detail.pushId, prio)
                            else:
                                firebase.kick(mobile_detail.pushId, prio)
                        else:
                            # Kick via GCM
                            if db.is_in_transaction():
                                on_trans_committed(gcm.kick, mobile_detail.pushId, prio)
                            else:
                                gcm.kick(mobile_detail.pushId, prio)
                    else:
                        # Kick via Jabbercloud
                        type_ = set()
                        if mobile_detail.type_ in {Mobile.TYPE_IPHONE_HTTP_XMPP_KICK, Mobile.TYPE_WINDOWS_PHONE, Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE}.union(Mobile.ANDROID_TYPES):
                            type_.add("xmpp")
                        if mobile_detail.type_ in (Mobile.TYPE_IPHONE_HTTP_APNS_KICK, Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE):
                            type_.add("apns")

                        cbd = dict(r=mobile_detail.account, p=prio, t=list(type_),
                                   kid=str(uuid.uuid4()), a=mobile_detail.app_id)
                        if mobile_detail.pushId:
                            cbd['d'] = mobile_detail.pushId
                            if "apns" in type_:
                                cbd['m'] = kwargs.get(CAPI_KEYWORD_ARG_APPLE_PUSH_MESSAGE, DEFAULT_APPLE_PUSH_MESSAGE)
                        if DEFER_KICK in kwargs:
                            deferred.defer(_deferred_kick, call_id, json.dumps(cbd),
                                           _countdown=2, _transactional=db.is_in_transaction(), _queue=FAST_QUEUE)
                        elif db.is_in_transaction():
                            on_trans_committed(kicks.append, json.dumps(cbd))
                        else:
                            kicks.append(json.dumps(cbd))
                return rpc_capi_call
def schedule_update_friends_of_profile_info(profile_info):
    """If profile_info is human user ==> update friends and services of human_user
    If profile_info is service_identity ==> update human friendMaps of service_identity"""
    if profile_info.isServiceIdentity:
        service_profile = get_service_profile(profile_info.service_user, not db.is_in_transaction())
        if not _must_continue_with_update_service(service_profile):
            return

    worker_queue = get_current_queue() or HIGH_LOAD_WORKER_QUEUE
    deferred.defer(_run_update_friends_by_profile_info, profile_info.key(),
                   worker_queue=worker_queue,
                   _transactional=db.is_in_transaction(),
                   _queue=worker_queue)
def schedule_update_a_friend_of_a_service_identity_user(service_identity_user, target_user, force=False,
                                                        clear_broadcast_settings_cache=False):
    '''Schedule update of 1 service_identity to 1 user'''
    azzert('/' in service_identity_user.email(), "Expecting a service identity user.")
    service_user = get_service_user_from_service_identity_user(service_identity_user)
    if db.is_in_transaction():
        service_profile_or_service_user = get_service_profile(service_user, False)
    else:
        service_profile_or_service_user = service_user
    if not force and not _must_continue_with_update_service(service_profile_or_service_user, False,
                                                            clear_broadcast_settings_cache):
        return
    deferred.defer(_run_update_friends_for_service_user, service_user, force, False, clear_broadcast_settings_cache,
                   get_one_friend_service_identity_connection_keys_query, [service_identity_user, target_user],
                   _transactional=db.is_in_transaction())
def ensure_in_transaction(func, *args, **kwargs):
    """ Runs the specified method in a transaction, if the current thread is
    not currently running in a transaction already.

    However, if we're running as part of the remote-api service, do
    *not* run in a transaction, since remote-api does not support
    transactions well (in particular, you can't do any queries while
    inside a transaction).  The remote-api shell marks itself in the
    SERVER_SOFTWARE environment variable; other remote-api users
    should do similarly.

    Arguments:
       func: the function to run in transcation
       *args, **kwargs: the args/kwargs to pass to func, with the
          exception of:
       xg_on: if True allow XG transactions (which are disallowed by default)
    """
    if 'xg_on' in kwargs:
        xg_on = kwargs['xg_on']
        del kwargs['xg_on']
    else:
        xg_on = None

    if db.is_in_transaction() or 'remote' in os.environ["SERVER_SOFTWARE"]:
        return func(*args, **kwargs)

    if xg_on is not None:
        options = db.create_transaction_options(xg=xg_on)
        return db.run_in_transaction_options(options, func, *args, **kwargs)
    else:
        return db.run_in_transaction(func, *args, **kwargs)
Exemple #6
0
    def _update(cls, name, sequence_num, status_code, output):
        """Updates job state in a datastore."""
        assert db.is_in_transaction()

        job = DurableJobEntity._get_by_name(name)
        if not job:
            logging.error('Job was not started or was deleted: %s', name)
            return
        if job.sequence_num != sequence_num:
            logging.warning('Request to update status code to %d ' %
                            status_code +
                            'for sequence number %d ' % sequence_num +
                            'but job is already on run %d' % job.sequence_num)
            return
        now = datetime.datetime.utcnow()
        if status_code in (STATUS_CODE_STARTED, STATUS_CODE_QUEUED):
            job.execution_time_sec = 0
        else:
            job.execution_time_sec += long(
                (now - job.updated_on).total_seconds())
        job.updated_on = now
        job.status_code = status_code
        if output:
            job.output = output
        job.put()
def send_message(user, type_, skip_dashboard_user=True, service_identity=None, **kwargs):
    if u"SERVER_SOFTWARE" in os.environ:
        targets = []
        connected_client_keys = []
        if isinstance(type_, list):
            data = json.dumps(type_)
        else:
            data = get_data(type_, **kwargs)

        for u in (user if isinstance(user, (set, list, tuple)) else [user]):
            if skip_dashboard_user and u == MC_DASHBOARD:
                continue
            offload(u, OFFLOAD_TYPE_WEB_CHANNEL, data, None, type_)
            connected_client_keys.append(ConnectedChannelClients.create_key(u))
            targets.append(u.email())

        for ccc in db.get(connected_client_keys):
            if ccc:
                if service_identity and ccc.identities:
                    for i in xrange(len(ccc.ids)):
                        if service_identity == ccc.identities[i] or (service_identity == "+default+" and ccc.identities[i] == ""):
                            targets.append(ccc.ids[i])
                else:
                    targets.extend(ccc.ids)

        for email_or_client_id in targets:
            if db.is_in_transaction():
                on_trans_committed(try_or_defer, _send_message, email_or_client_id, data)
            else:
                try_or_defer(_send_message, email_or_client_id, data)
        return True
def send_message_to_session(service_user, session, type_, **kwargs):
    ccc = db.get(ConnectedChannelClients.create_key(service_user))
    if ccc:
        if isinstance(type_, list):
            data = json.dumps(type_)
        else:
            data = get_data(type_, **kwargs)

        offload(service_user, OFFLOAD_TYPE_WEB_CHANNEL, data, None, type_)

        s = StringIO()
        s.write('b-')
        s.write(base64.b32encode(session.key().name().decode('hex')).replace("=", ""))
        s.write('|')
        channel_id = s.getvalue()

        targets = []
        for client_id in ccc.ids:
            if client_id.startswith(channel_id):
                targets.append(client_id)

        for client_id in targets:
            if db.is_in_transaction():
                on_trans_committed(try_or_defer, _send_message, client_id, data)
            else:
                try_or_defer(_send_message, client_id, data)
def start(job_config=None,
          in_xg_transaction=False):
  """Start a new map job.

  Args:
    job_config: an instance of map_job.MapJobConfig.
    in_xg_transaction: controls what transaction scope to use to start this MR
      job. If True, there has to be an already opened cross-group transaction
      scope. MR will use one entity group from it.
      If False, MR will create an independent transaction to start the job
      regardless of any existing transaction scopes.

  Returns:
    the id of this map job.

  Raises:
    ValueError: when in_xg_transaction is True but no transaction scope is
      detected.
  """
  if in_xg_transaction and not db.is_in_transaction():
    raise ValueError("Expects an opened xg transaction to start mapreduce.")

  # Break circular dependency.
  # pylint: disable=g-import-not-at-top
  from mapreduce import handlers

  return handlers.StartJobHandler._start_map(
      name=job_config.job_name,
      mapper_spec=job_config._get_mapper_spec(),
      mapreduce_params=job_config._get_mr_params(),
      queue_name=job_config.queue_name,
      hooks_class_name=util._obj_to_path(job_config._hooks_cls),
      _app=job_config._app,
      in_xg_transaction=in_xg_transaction)
def _reschedule_post_to_social_media(ssb):
    countdown = ssb.timestamp - now()
    if countdown >= 0:
        logging.debug('Rescheduling of post to social media, was scheduled at: %d', countdown)
        deferred.defer(post_to_social_media_scheduled, ssb.key_str,
                       _countdown=countdown, _queue=SCHEDULED_QUEUE,
                       _transactional=db.is_in_transaction())
    def __setitem__(self, key, value):
        azzert(db.is_in_transaction())
        azzert(isinstance(key, unicode) and len(key) <= MAX_KEY_LENGTH)

        if key in self._keys:
            existing_type = 'simple'
        elif key in self._blob_keys:
            existing_type = 'blob'
        else:
            existing_type = 'new'

        # check the type of value. if StringIO then BlobBucket, else Bucket
        if isinstance(value, STRING_IO_TYPES):
            if existing_type == 'simple':
                del self[key]
            self._set_blob(key, value)
        else:
            # If value is a str or unicode, check if the length is less than MAX_STRING_PROPERTY_LENGTH
            azzert(not isinstance(value, basestring) or len(value) <= MAX_STRING_PROPERTY_LENGTH)
            azzert(self._is_simple_type(value))
            if key.startswith('_'):  # db.Expando doesn't save properties starting with an underscore
                raise InvalidKeyError('Invalid key: %s' % key, key)
            if existing_type == 'blob':
                del self[key]
            self._set_simple_value(key, value)
Exemple #12
0
def start_flow(service_identity_user, message_parent_key, flow, members, check_friends, result_callback, tag=None,
               context=None, key=None, force_language=None, allow_reserved_tag=False, broadcast_type=None,
               broadcast_guid=None, flow_params=None):
    # key is in fact a force_message_flow_run_id
    svc_user = get_service_user_from_service_identity_user(service_identity_user)

    if isinstance(flow, (str, unicode)):
        mfd = get_message_flow_by_key_or_name(svc_user, flow)
        if not mfd or not mfd.user == svc_user:
            raise MessageFlowNotFoundException()

        if mfd.status != MessageFlowDesign.STATUS_VALID:
            raise MessageFlowNotValidException(mfd.validation_error)
    else:
        mfd = flow

    _validate_start_flow(service_identity_user, message_parent_key, members, check_friends, tag, allow_reserved_tag,
                         flow_params)
    mfr = _create_message_flow_run(svc_user, service_identity_user, key, result_callback, tag, flow_params)
    message_flow_run_id = mfr.messageFlowRunId

    d = hashlib.sha256(message_flow_run_id)
    d.update('key for first message in flow!')

    try_or_defer(_execute_flow, service_identity_user, mfd, mfr, members, message_parent_key, context, d.hexdigest(),
                 force_language=force_language, broadcast_type=broadcast_type, broadcast_guid=broadcast_guid, tag=tag,
                 _transactional=db.is_in_transaction())

    return message_flow_run_id
def _send_broadcast_to_test_persons(broadcast):
    testers_to_find = list(broadcast.test_persons)
    si_mapped_to_testers = dict()
    for si in ServiceIdentity.all().ancestor(parent_key(broadcast.service_user)).run():
        keys = [FriendServiceIdentityConnection.createKey(u, si.user) for u in testers_to_find]
        for fsic in db.get(keys):
            if fsic and not fsic.deleted:
                if not si in si_mapped_to_testers:
                    si_mapped_to_testers[si] = list()
                si_mapped_to_testers[si].append(fsic.friend)
                testers_to_find.remove(fsic.friend)
            if not testers_to_find:
                break
        if not testers_to_find:
            break
    bizz_check(not testers_to_find,
               u"Could not find a connected service identity for %s" % [x.email() for x in testers_to_find])
    mfd = MessageFlowDesign.get(broadcast.message_flow)

    # Make sure all end modules are connected with a flush
    new_xml = _check_flow_end_modules(mfd)
    if new_xml:
        mfd = CustomMessageFlowDesign()
        mfd.xml = new_xml

    for si, testers in si_mapped_to_testers.iteritems():
        deferred.defer(start_flow, si.user, message_parent_key=None, flow=mfd, members=testers,
                       check_friends=False, result_callback=False,
                       tag=json.dumps({Broadcast.TAG_MC_BROADCAST: unicode(broadcast.key()),
                                       '%s.tag' % MC_RESERVED_TAG_PREFIX: broadcast.tag}),
                       _transactional=db.is_in_transaction(), broadcast_type=broadcast.type_)
Exemple #14
0
    def enqueue(domain_identifier, task_identifier, transactional=False):
        """
        Queues a new worker to update the task hierarchy of the task
        with the given identifier.

        Args:
            domain_identifier: The domain identifier string
            task_identifier: The task identifier string
            transactional: If set to true, then the task will be added
                as a transactional task.

        Raises:
            ValueError: If transactional is set to True and the
                 function is not called as part of a transaction.
        """
        if transactional and not db.is_in_transaction():
            raise ValueError("Adding a transactional worker requires a"
                             " transaction")

        queue = taskqueue.Queue('update-task-hierarchy')
        task = taskqueue.Task(url='/workers/update-task-hierarchy',
                              params={ 'task': task_identifier,
                                       'domain': domain_identifier })
        try:
            queue.add(task, transactional=transactional)
        except taskqueue.TransientError:
            queue.add(task, transactional=transactional)
def grant_role(service_identity_user, user, role):
    admin = False
    service_identity = get_service_identity(service_identity_user)
    if not service_identity:
        raise ServiceIdentityDoesNotExistException(get_identity_from_service_identity_user(service_identity_user))
    if isinstance(role, unicode):
        admin = True
        azzert(role in ROLES)  # builtin
    else:
        azzert(role.service_user == service_identity.service_user)
        role = u'sr:%s' % role.role_id

    def trans():
        user_profile = get_user_profile(user, False)
        bizz_check(user_profile, "User %s does not exist" % user.email())
        user_profile.grant_role(service_identity_user, role)
        user_profile.put()

    if db.is_in_transaction():
        trans()
    else:
        db.run_in_transaction(trans)

    if admin:
        _send_admin_role_updates(service_identity_user)
Exemple #16
0
def ret_existing_gid_item(group_name, value, ref_object_key=None):
    logging.info("ret_existing_gid_item begin value = %d" % value)
    now = datetime.datetime.utcnow()

    def txn():
        logging.info("transaction begin")
        gg = gid_group_get_or_create(group_name)
        gi = GidItem.get_by_key_name(str(value), parent=gg)
        logging.info("group and item ready")
        gi.usage = gi.usage - 1
        if gi.usage == 0:
            logging.info("item away")
            gi.away = True
            gg.group_away_count = gg.group_away_count + 1
            va = gg.group_away_count
            logging.info("new group_away_count %s" % str(va))
            gg.update_ts = now
        elif gi.usage == 1:
            logging.info("item no dupl")
            gg.group_dupl_count = gg.group_dupl_count - 1
            logging.info("new group_dupl_count %s" % str(gg.group_dupl_count))
            gg.update_ts = now
        logging.info("transaction final commit")
        gi.save()
        gg.save()
        put_new_item_log(gi, "ret", now, ref_object_key, "final usage is %d" % gi.usage)

        logging.info("transaction end")
        return gi.usage

    if db.is_in_transaction():
        logging.info("NO TRANSACTION?")
        return txn()
    return db.run_in_transaction_options(xg_on, txn)
Exemple #17
0
    def enqueue(domain_identifier, task_identifier, transactional=False):
        """
        Queues a new worker to update the task hierarchy of the task
        with the given identifier.

        Args:
            domain_identifier: The domain identifier string
            task_identifier: The task identifier string
            transactional: If set to true, then the task will be added
                as a transactional task.

        Raises:
            ValueError: If transactional is set to True and the
                 function is not called as part of a transaction.
        """
        if transactional and not db.is_in_transaction():
            raise ValueError("Adding a transactional worker requires a"
                             " transaction")

        queue = taskqueue.Queue('update-task-hierarchy')
        task = taskqueue.Task(url='/workers/update-task-hierarchy',
                              params={
                                  'task': task_identifier,
                                  'domain': domain_identifier
                              })
        try:
            queue.add(task, transactional=transactional)
        except taskqueue.TransientError:
            queue.add(task, transactional=transactional)
def assemble_data(from_, to, stash, cursor, dry_run=True):
    key = '%s-%s' % (from_, to)
    dsp = DSPickler.read(key)
    if stash is None:
        stash = dsp.data
    logging.info("Looking for unread messages in the following time frame:\nfrom_: %s\nto: %s",
                 time.ctime(from_), time.ctime(to))
    query = GET_UNREAD_MESSAGES_JOB_GQL()
    query.bind(from_=from_, to=to)
    query.with_cursor(cursor)
    ums = query.fetch(100)
    ums_length = len(ums)
    for um in ums:
        if not um:
            logging.info("Skipped stale record.")
            continue
        if not (from_ < um.creationTimestamp <= to):
            logging.info("Skipped out-dated query result of %s with timestamp %s" % (um.mkey, time.ctime(um.creationTimestamp / MICRO_MULTIPLIER)))
            continue
        actors = [member.email() for member in um.members
                  if Message.statusIndexValue(member, Message.MEMBER_INDEX_STATUS_NOT_RECEIVED) in um.member_status_index
                  and Message.statusIndexValue(member, Message.MEMBER_INDEX_STATUS_NOT_DELETED) in um.member_status_index]
        for actor in actors:
            if not actor in stash:
                stash[actor] = [0, set()]
            stats = stash[actor]
            stats[0] = stats[0] + 1
            stats[1].add((um.sender, um.message, um.broadcast_type, um.creationTimestamp))
    if ums_length > 0:
        dsp.update(stash)
        deferred.defer(assemble_data, from_, to, None, query.cursor(), dry_run, _transactional=db.is_in_transaction())
    else:
        dsp.update((stash.keys(), stash.items()))
        deferred.defer(schedule_send_email, from_, to, 0, dry_run, _transactional=db.is_in_transaction())
Exemple #19
0
def insert_suppression_results(step, suppression_results):
    """Inserts GTest results into the datastore, replacing any existing ones.
  Also records used parser version. Must be used inside a transaction."""
    assert db.is_in_transaction()

    old_parser_version = step.suppression_parser_version

    delete_suppression_results(step.key())

    # Record the parser version used for stored results.
    step.suppression_parser_version = suppression_parser.VERSION
    step.put()

    # Insert new results.
    to_put = []
    for suppression_name in suppression_results:
        to_put.append(
            MemorySuppressionResult(
                parent=step,
                build_step=step,
                step_name=step.step_name,
                time_finished=step.time_finished,
                suppression_parser_version=step.suppression_parser_version,
                name=suppression_name))
    db.put(to_put)

    # Only update summaries for completely new results.
    if old_parser_version == -1:
        for chunk in chunks(to_put, BATCH_SIZE):
            deferred.defer(update_suppression_summaries,
                           [r.key() for r in chunk],
                           _queue='gtest-summaries')
Exemple #20
0
    def _update(cls, name, sequence_num, status_code, output):
        """Updates job state in a datastore."""
        assert db.is_in_transaction()

        job = DurableJobEntity._get_by_name(name)
        if not job:
            logging.error('Job was not started or was deleted: %s', name)
            return
        if job.sequence_num != sequence_num:
            logging.warning(
                'Request to update status code to %d ' % status_code +
                'for sequence number %d ' % sequence_num +
                'but job is already on run %d' % job.sequence_num)
            return
        now = datetime.datetime.utcnow()
        if status_code == STATUS_CODE_QUEUED:
            job.execution_time_sec = 0
        else:
            job.execution_time_sec += long(
                (now - job.updated_on).total_seconds())
        job.updated_on = now
        job.status_code = status_code
        if output:
            job.output = output
        job.put()
def _must_continue_with_update_service(service_profile_or_user, bump_service_version=False,
                                       clear_broadcast_settings_cache=False):
    def trans(service_profile):
        azzert(service_profile)
        service_profile_updated = False
        if not service_profile.autoUpdating and not service_profile.updatesPending:
            service_profile.updatesPending = True
            service_profile_updated = True
        if bump_service_version:
            service_profile.version += 1
            service_profile_updated = True
        if clear_broadcast_settings_cache:
            service_profile.addFlag(ServiceProfile.FLAG_CLEAR_BROADCAST_SETTINGS_CACHE)
            service_profile_updated = True

        if service_profile_updated:
            channel.send_message(service_profile.user, 'rogerthat.service.updatesPendingChanged',
                                 updatesPending=service_profile.updatesPending)
            service_profile.put()

        return service_profile.autoUpdating

    is_user = not isinstance(service_profile_or_user, ServiceProfile)
    if db.is_in_transaction():
        azzert(not is_user)
        service_profile = service_profile_or_user
        auto_updating = trans(service_profile_or_user)
    else:
        service_profile = get_service_profile(service_profile_or_user, False) if is_user else service_profile_or_user
        auto_updating = db.run_in_transaction(trans, service_profile)

    if not auto_updating:
        logging.info("Auto-updates for %s are suspended." % service_profile.user.email())
    return auto_updating
def delete_service_role(service_user, role_id, cleanup_members=False):
    service_role = get_service_role_by_id(service_user, role_id)
    if not service_role:
        raise RoleNotFoundException(role_id)

    if bool(ServiceMenuDef.all().ancestor(parent_key(service_user)).filter('roles =', role_id).count(1)):
        raise DeleteRoleFailedHasSMDException(role_id)

    if cleanup_members:
        service_identity_user = create_service_identity_user(service_user)
        role = u'%s' % role_id
        for srg in get_service_role_grants(service_user, role_id):
            app_user = create_app_user_by_email(srg.user_email, srg.app_id)

            def trans():
                user_profile = get_user_profile(app_user, False)
                user_profile.revoke_role(service_identity_user, role)
                user_profile.put()

            if db.is_in_transaction():
                trans()
            else:
                db.run_in_transaction(trans)
    else:
        has_grants = any(get_service_role_grants(service_user, role_id))
        if has_grants:
            raise DeleteRoleFailedHasMembersException(role_id)

    db.delete(service_role)
    _send_service_role_updates(service_user)
Exemple #23
0
def insert_suppression_results(step, suppression_results):
  """Inserts GTest results into the datastore, replacing any existing ones.
  Also records used parser version. Must be used inside a transaction."""
  assert db.is_in_transaction()

  old_parser_version = step.suppression_parser_version

  delete_suppression_results(step.key())

  # Record the parser version used for stored results.
  step.suppression_parser_version = suppression_parser.VERSION
  step.put()

  # Insert new results.
  to_put = []
  for suppression_name in suppression_results:
    to_put.append(MemorySuppressionResult(
        parent=step,
        build_step=step,
        step_name=step.step_name,
        time_finished=step.time_finished,
        suppression_parser_version=step.suppression_parser_version,
        name=suppression_name))
  db.put(to_put)

  # Only update summaries for completely new results.
  if old_parser_version == -1:
    for chunk in chunks(to_put, BATCH_SIZE):
      deferred.defer(update_suppression_summaries,
                     [r.key() for r in chunk],
                     _queue='gtest-summaries')
Exemple #24
0
    def get_overrides(cls, force_update=False):
        """Returns current property overrides, maybe cached."""

        now = long(time.time())
        age = now - cls.last_update_time
        max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides)

        # do not update if call is reentrant or outer db transaction exists
        busy = hasattr(cls.threadlocal, cls.REENTRY_ATTR_NAME) or (db.is_in_transaction())

        if (not busy) and (force_update or age < 0 or age >= max_age):
            # Value of '0' disables all datastore overrides.
            if UPDATE_INTERVAL_SEC.get_value() == 0:
                cls.db_overrides = {}
                return cls.db_overrides

            # Load overrides from a datastore.
            setattr(cls.threadlocal, cls.REENTRY_ATTR_NAME, True)
            try:
                old_namespace = namespace_manager.get_namespace()
                try:
                    namespace_manager.set_namespace(appengine_config.DEFAULT_NAMESPACE_NAME)
                    cls._load_from_db()
                finally:
                    namespace_manager.set_namespace(old_namespace)
            except Exception as e:  # pylint: disable=broad-except
                logging.error("Failed to load properties from a database: %s.", str(e))
            finally:
                delattr(cls.threadlocal, cls.REENTRY_ATTR_NAME)

                # Avoid overload and update timestamp even if we failed.
                cls.last_update_time = now
                cls.update_index += 1

        return cls.db_overrides
Exemple #25
0
def create_new_gid_item(group_name, ref_object_key=None):
    logging.info("create_new_gid_item begin")
    now = datetime.datetime.utcnow()

    def txn():
        logging.info("transaction begin")
        gg = gid_group_get_or_create(group_name)
        logging.info("group ready")
        gg.group_max_value = gg.group_max_value + 1
        gg.group_new_count = gg.group_new_count + 1
        gg.update_ts = now
        value = gg.group_max_value
        logging.info("new value %s" % (str(value)))
        gi = GidItem(parent=gg, key_name=str(value))
        gi.usage = 1
        gi.create_ts = now
        logging.info("transaction final commit")
        gi.put()
        gg.put()
        put_new_item_log(gi, "new", now, ref_object_key, None)
        logging.info("transaction end")
        return value

    if db.is_in_transaction():
        return txn()

    val = db.run_in_transaction_options(xg_on, txn)
    return val
def revoke_role(service_identity_user, user, role):
    admin = False
    service_identity = get_service_identity(service_identity_user)
    if not service_identity:
        raise ServiceIdentityDoesNotExistException(get_identity_from_service_identity_user(service_identity_user))
    if isinstance(role, unicode):
        admin = True
        azzert(role in ROLES)  # builtin
    else:
        azzert(role.service_user == service_identity.service_user)
        role = u'sr:%s' % role.role_id

    def trans():
        user_profile = get_user_profile(user, False)
        user_profile.revoke_role(service_identity_user, role)
        user_profile.put()
        if admin:
            deferred.defer(drop_sessions_of_user, user, _transactional=True)

    if db.is_in_transaction():
        trans()
    else:
        db.run_in_transaction(trans)

    if admin:
        _send_admin_role_updates(service_identity_user)
 def index(self, obj, fields_to_index, defer_index=True):
     """ Index an object. Will defer the indexing if defer_index is true or if called inside a transaction.
         Indexing an object will always unindex the object first.
     """
     if db.is_in_transaction() or defer_index:
         defer(self.reindex, obj, fields_to_index, defer_index=defer_index, _queue=QUEUE_FOR_INDEXING)
     else:
         self.reindex(obj, fields_to_index, defer_index=defer_index)
def send_message_to_session(service_user, session, type_, **kwargs):
    data = get_data_as_json(type_, **kwargs)
    offload(service_user, OFFLOAD_TYPE_WEB_CHANNEL, data, None, type_)

    if db.is_in_transaction():
        on_trans_committed(try_or_defer, _send_message, service_user, data, session)
    else:
        try_or_defer(_send_message, service_user, data, session)
def get_app(app_id):
    def trans():
        app = App.get(App.create_key(app_id))
        if not app:
            raise AppDoesNotExistException(app_id)
        return app

    return trans() if db.is_in_transaction() else db.run_in_transaction(trans)
    def _trigger_invalidate_cache(self):
        def invalidate_cache():
            self.invalidateCache()
            logging.info("%s: Cache invalidated", self.__class__.__name__)

        if db.is_in_transaction() and self.on_trans_committed:
            self.on_trans_committed(invalidate_cache)
        else:
            invalidate_cache()
def _reschedule_broadcast_message(ssb):
    service_user = ssb.service_user
    sln_settings = get_solution_settings(service_user)
    countdown = (ssb.broadcast_epoch + timezone_offset(sln_settings.timezone)) - now()
    if countdown >= 0:
        logging.debug('Rescheduling of broadcast message, was scheduled at: %d', countdown)
        deferred.defer(_send_scheduled_broadcast, service_user, ssb.key_str,
                       _countdown=countdown, _queue=SCHEDULED_QUEUE,
                       _transactional=db.is_in_transaction())
def send_mail_via_mime(from_, to, mime, transactional=None):
    try:
        azzert(to)
    except:
        logging.exception('There were no recipients. Not sending out the email.', _suppress=False)
        return
    if transactional is None:
        transactional = db.is_in_transaction()
    deferred.defer(_send_mail_via_mime, from_, to, mime, _transactional=transactional, _queue=FAST_QUEUE)
Exemple #33
0
def create_or_update_solution_service(
        solution,
        email,
        name,
        branding_url,
        menu_item_color,
        address,
        phone_number,
        languages,
        currency,
        redeploy,
        organization_type=OrganizationType.PROFIT,
        modules=None,
        broadcast_types=None,
        apps=None,
        owner_user_email=None,
        search_enabled=False,
        qualified_identifier=None,
        broadcast_to_users=None):
    if not redeploy:
        password, sln_settings = \
            create_solution_service(email, name, branding_url, menu_item_color, address, phone_number,
                                    solution, languages, currency, organization_type=organization_type, modules=modules,
                                    broadcast_types=broadcast_types, apps=apps, owner_user_email=owner_user_email,
                                    search_enabled=search_enabled)
        service_user = sln_settings.service_user
    else:
        service_user = users.User(email)
        sln_settings = update_solution_service(
            service_user,
            branding_url,
            menu_item_color,
            solution,
            languages,
            currency,
            modules=modules,
            broadcast_types=broadcast_types,
            apps=apps,
            organization_type=organization_type,
            name=name,
            address=address,
            phone_number=phone_number,
            qualified_identifier=qualified_identifier)
        password = None

    deferred.defer(common_provision,
                   service_user,
                   broadcast_to_users=broadcast_to_users,
                   _transactional=db.is_in_transaction(),
                   _queue=FAST_QUEUE)

    resp = ProvisionResponseTO()
    resp.login = email
    resp.password = password
    resp.auto_login_url = generate_auto_login_url(service_user)
    return resp
Exemple #34
0
    def pre_save(self, model_instance, add):
        # Prevent changes outside of a transaction
        if model_instance.id:
            old_value = getattr(self.model.objects.get(id=model_instance.id), self.attname)
            new_value = getattr(model_instance, self.attname)
            if old_value != new_value:
                if not is_in_transaction():
                    raise TransactionError("%s updates must use transactions (old_val: %d, new_val: %d)" % (self.attname,old_value, new_value))

        return super(TransactionalField, self).pre_save(model_instance, add)
def update_beacons(service_identity_user, beacons):
    from rogerthat.bizz.service import BeaconAlreadyCoupledException, NoBeaconRegionFoundException

    if beacons is MISSING or beacons is None:
        beacons = list()

    service_user = get_service_user_from_service_identity_user(service_identity_user)
    identifier = get_identity_from_service_identity_user(service_identity_user)

    @db.non_transactional
    def _validate_beacon(beacon_name, beacon_uuid, beacon_major, beacon_minor, supported_app_ids):
        b = get_beacon(beacon_uuid, beacon_name)
        if b and b.service_identity_user != service_identity_user:
            raise BeaconAlreadyCoupledException(beacon_uuid, beacon_major, beacon_minor)

        for beacon_region in BeaconRegion.all().filter('uuid', beacon_uuid):
            if beacon_region.major is not None:
                if beacon_region.major != beacon_major:
                    continue  # the beacon does not belong to this region

                if beacon_region.minor is not None:
                    if beacon_region.mino != beacon_minor:
                        continue  # the beacon does not belong to this region

            if beacon_region.app_id not in supported_app_ids:
                continue  # the service_identity does not support this app

            break
        else:
            raise NoBeaconRegionFoundException(beacon_uuid, beacon_major, beacon_minor)


    def trans():
        l = get_beacons(service_user, identifier)
        db.delete(l)
        beacons_to_put = list()
        for beacon in beacons:
            beacon_name = u"%s|%s" % (beacon.major, beacon.minor)
            beacon_uuid = beacon.uuid.lower()
            _validate_beacon(beacon_name, beacon_uuid, beacon.major, beacon.minor,
                             get_service_identity(service_identity_user).appIds)
            b = Beacon(key=Beacon.create_key(service_identity_user, beacon_name))
            b.uuid = beacon_uuid
            b.name = beacon_name
            b.tag = beacon.tag
            b.service_identity = identifier
            b.creation_time = now()
            beacons_to_put.append(b)
        if beacons_to_put:
            db.put(beacons_to_put)

    if db.is_in_transaction():
        return trans()
    else:
        return db.run_in_transaction(trans)
def get_profile_infos(users_, update_request_cache=True, update_mem_cache=True, allow_none_in_results=False,
                      expected_types=None):
    if db.is_in_transaction():
        r = _get_profile_infos_not_cached(users_)
        if not allow_none_in_results:
            azzert(None not in r, "There is no ProfileInfo for %s" % [k for k, v in zip(users_, r) if v is None])
        _validate_profile_info_types(expected_types, r)
        return r

    profile_infos = dict()
    # First try request cache
    cache_misses = False
    for user in users_:
        result = get_from_request_cache(_get_profile.cache_key(user)) if '/' not in user.email() else MISSING
        if result == MISSING or not result[0] or isinstance(result[1], ServiceProfile):
            # Lets try the ServiceIdentity cache
            f = get_service_identity
            result = get_from_request_cache(f.cache_key(user))
        if result == MISSING or not result[0]:
            cache_misses = True
            profile_infos[user] = None
        else:
            profile_infos[user] = result[1]
    if not cache_misses:
        r = [profile_infos[u] for u in users_]  # Keep original order
        if not allow_none_in_results:
            azzert(None not in r)
        _validate_profile_info_types(expected_types, r)
        return r

    remaining_profile_infos_to_get = [user for user, profile_info in profile_infos.iteritems() if profile_info is None]
    if len(remaining_profile_infos_to_get) == 0:
        r = [profile_infos[u] for u in users_]  # Keep original order
        _validate_profile_info_types(expected_types, r)
        return r

    remaining_profile_infos = _get_profile_infos_not_cached(remaining_profile_infos_to_get)
    for i in xrange(len(remaining_profile_infos_to_get)):
        profile_info = remaining_profile_infos[i]
        if profile_info is None:
            continue
        profile_infos[remaining_profile_infos_to_get[i]] = profile_info
        if update_request_cache:
            f = get_service_identity if profile_info.isServiceIdentity else _get_profile
            cache_key = f.cache_key(profile_info.user)
            add_to_request_cache(cache_key, True, profile_info)
            if profile_info.isServiceIdentity and profile_info.is_default:
                cache_key2 = f.cache_key(profile_info.service_user)
                add_to_request_cache(cache_key2, True, profile_info)

    r = [profile_infos[u] for u in users_]  # Keep original order
    if not allow_none_in_results:
        azzert(None not in r)
    _validate_profile_info_types(expected_types, r)
    return r
Exemple #37
0
def _get_job(job_key, phase=None):
    def trans():
        job = db.get(job_key)

        bizz_check(job, "Job with key %s not found!" % job_key)
        if phase is not None:
            bizz_check(job.phase == phase,
                       "Expected job %s to be in phase %s, but the phase was %s" % (job_key, phase, job.phase))
        return job

    return trans() if db.is_in_transaction() else db.run_in_transaction(trans)
Exemple #38
0
    def _create_job(cls, name):
        """Creates new or reset a state of existing job in a datastore."""
        assert db.is_in_transaction()

        job = DurableJobEntity._get_by_name(name)
        if not job:
            job = DurableJobEntity(key_name=name)
        job.updated_on = datetime.now()
        job.execution_time_sec = 0
        job.status_code = STATUS_CODE_QUEUED
        job.output = None
        job.put()
Exemple #39
0
    def queue_worker(task, add_assignee=None, remove_assignee=None):
        """
        Queues a new worker to update the assignees of the given
        task.

        There are two arguments: add_assignee is used to add an
        assignee to a task and remove_assignee is used to remove one
        from the task. They can also be None if none is updated. They
        can also both be specified, to indicate a change in assignees.

        This function must be run as part of a transaction because
        this function increments the assignee sequence number of the
        tasks and queues a transactional task.

        If both assignee arguments are None, or are the same User
        instance, then this function will act as a no-op.

        Args:
            task: An instance of the Task mode.
            add_assignee: The identifier string of the user that is added
                as assignee. Can be None.
            remove_assignee: The identifier string of the user that is
                removed as assignee. Can be None.

        Raises:
            ValueError: If this function is called outside a transaction.
        """
        if not add_assignee and not remove_assignee:
            return
        if (add_assignee and remove_assignee
                and add_assignee.identifier() == remove_assignee.identifier()):
            return

        if not db.is_in_transaction():
            raise ValueError("Requires a transaction")

        sequence = task.assignee_index_sequence
        task.assignee_index_sequence = sequence + 1
        task.put()
        queue = taskqueue.Queue('update-assignee-index')
        task = taskqueue.Task(url='/workers/update-assignee-index',
                              params={
                                  'task': task.identifier(),
                                  'domain': task.domain_identifier(),
                                  'sequence': sequence,
                                  'add_assignee': add_assignee,
                                  'remove_assignee': remove_assignee
                              })
        try:
            queue.add(task, transactional=True)
        except taskqueue.TransientError:
            queue.add(task, transactional=True)
Exemple #40
0
    def pre_save(self, model_instance, add):
        # Prevent changes outside of a transaction
        if model_instance.id:
            old_value = getattr(self.model.objects.get(id=model_instance.id),
                                self.attname)
            new_value = getattr(model_instance, self.attname)
            if old_value != new_value:
                if not is_in_transaction():
                    raise TransactionError(
                        "%s updates must use transactions (old_val: %d, new_val: %d)"
                        % (self.attname, old_value, new_value))

        return super(TransactionalField, self).pre_save(model_instance, add)
Exemple #41
0
    def _update(cls, name, status_code, output, execution_time_sec):
        """Updates job state in a datastore."""
        assert db.is_in_transaction()

        job = DurableJobEntity._get_by_name(name)
        if not job:
            logging.error('Job was not started or was deleted: %s', name)
            return
        job.updated_on = datetime.now()
        job.execution_time_sec = execution_time_sec
        job.status_code = status_code
        job.output = output
        job.put()
Exemple #42
0
    def save(self, *args, **kwargs):
        if not is_in_transaction():
            try:
                order = self.order
                self.dn_pickup = order.from_raw
                self.dn_dropoff = order.to_raw
                self.dn_pickup_time = order.depart_time
                self.passenger = order.passenger
                self.dn_passenger_name = order.passenger.name
            except Order.DoesNotExist:
                pass

        super(BillingTransaction, self).save(*args, **kwargs)
Exemple #43
0
def on_trans_committed(func, *args, **kwargs):
    """
    Executes func when the transaction the function is run in has completed.

    Args:
        func: Function to execute
        *args: Positional arguments for func
        **kwargs: Keyword arguments for func

    Notes:
        Does not return the function's return value.
    """
    azzert(db.is_in_transaction())
    post_transaction_actions.append(True, func, *args, **kwargs)
def _get_config(name):
    """ A safe way to do an atomic get_or_insert that can also be run
    inside of a transaction.
    
    """

    if db.is_in_transaction():
        config = ShardedCounterConfig.get_by_key_name(name)
        if not config:
            config = ShardedCounterConfig(key_name=name, name=name)
            config.put()
        return config
    else:
        return ShardedCounterConfig.get_or_insert(name, name=name)
Exemple #45
0
def _put_and_invalidate_cache_and_allocate_ids(*models):

    @db.non_transactional
    def _allocate_non_transactional():
        _allocate()

    def _allocate():
        from google.appengine.api import datastore
        datastore._GetConnection()._reserve_keys([m.key() for m in models if m.key().id() is not None])

    put_and_invalidate_cache(*models)
    if db.is_in_transaction():
        _allocate_non_transactional()
    else:
        _allocate()
Exemple #46
0
def schedule_post_to_social_media(service_user, host, on_facebook, on_twitter,
                                  facebook_access_token, news_id,
                                  scheduled_at):
    if scheduled_at < 1:
        return

    scheduled_broadcast = get_scheduled_broadcast(news_id,
                                                  service_user,
                                                  create=True)
    if scheduled_broadcast.timestamp == scheduled_at:
        return

    if on_facebook:
        if not facebook_access_token:
            if scheduled_broadcast.facebook_access_token:
                facebook_access_token = scheduled_broadcast.facebook_access_token
            else:
                raise ValueError(
                    'facebook access token is not provided, %s, news id: %d' %
                    (service_user, news_id))

        # try to extend facebook access token first
        try:
            facebook_access_token = facebook.extend_access_token(
                host, facebook_access_token)
        except:
            logging.error('Cannot get an extended facebook access token',
                          exc_info=True)

    if scheduled_broadcast.scheduled_task_name:
        # remove the old scheduled task
        task_name = str(scheduled_broadcast.scheduled_task_name)
        taskqueue.Queue(SCHEDULED_QUEUE).delete_tasks_by_name(task_name)

    scheduled_broadcast.timestamp = scheduled_at
    scheduled_broadcast.broadcast_on_facebook = on_facebook
    scheduled_broadcast.broadcast_on_twitter = on_twitter
    scheduled_broadcast.facebook_access_token = facebook_access_token
    scheduled_broadcast.news_id = news_id

    task = deferred.defer(post_to_social_media_scheduled,
                          scheduled_broadcast.key_str,
                          _countdown=scheduled_at - now(),
                          _queue=SCHEDULED_QUEUE,
                          _transactional=db.is_in_transaction())

    scheduled_broadcast.scheduled_task_name = task.name
    scheduled_broadcast.put()
Exemple #47
0
def get_cityapp_profile(service_user):
    def trans():
        cityapp_profile = CityAppProfile.get(
            CityAppProfile.create_key(service_user))
        if not cityapp_profile:
            cityapp_profile = CityAppProfile(
                key=CityAppProfile.create_key(service_user))
            cityapp_profile.uitdatabank_enabled = False
            cityapp_profile.uitdatabank_key = None
            cityapp_profile.uitdatabank_region = None
            cityapp_profile.uitdatabank_regions = []
            cityapp_profile.gather_events_enabled = False
            cityapp_profile.put()
        return cityapp_profile

    return trans() if db.is_in_transaction() else db.run_in_transaction(trans)
Exemple #48
0
def run_in_transaction(function, xg=False, *args, **kwargs):
    """Runs specified function in a transaction.
    If called inside a transaction, the function is executed without creating a new transaction.

    Args:
        function: a function to be run inside the transaction on all remaining arguments
        xg: set to true to allow cross-group transactions (high replication datastore only)
        *args: Positional arguments for function.
        **kwargs: Keyword arguments for function.

    Returns:
        The function's return value, if any
    """
    if db.is_in_transaction():
        return function(*args, **kwargs)
    elif xg:
        transaction_options = db.create_transaction_options(xg=True)
        return db.run_in_transaction_options(transaction_options, function,
                                             *args, **kwargs)
    else:
        return db.run_in_transaction(function, *args, **kwargs)
def add(name, n):
    '''Add n to the counter (n < 0 is valid)'''
    try:
        config = _get_config(name)

        def transaction():
            index = random.randint(0, config.num_shards - 1)
            shard_name = name + str(index)
            counter = ShardedCounter.get_by_key_name(shard_name)
            if counter is None:
                counter = ShardedCounter(key_name=shard_name, name=name)
            counter.count += n
            counter.put()

        if db.is_in_transaction():
            transaction()
        else:
            db.run_in_transaction(transaction)

    except Exception, e:
        logging.error("Error in add: %s" % e)
    def _update(cls, name, sequence_num, status_code, output,
                execution_time_sec):
        """Updates job state in a datastore."""
        assert db.is_in_transaction()

        job = DurableJobEntity._get_by_name(name)
        if not job:
            logging.error('Job was not started or was deleted: %s', name)
            return
        if job.sequence_num != sequence_num:
            logging.warning('Request to update status code to %d ' %
                            status_code +
                            'for sequence number %d ' % sequence_num +
                            'but job is already on run %d' % job.sequence_num)
            return
        job.updated_on = datetime.datetime.now()
        job.execution_time_sec = execution_time_sec
        job.status_code = status_code
        if output:
            job.output = output
        job.put()
Exemple #51
0
    def get_overrides(cls, force_update=False):
        """Returns current property overrides, maybe cached."""

        now = long(time.time())
        age = now - cls.last_update_time
        max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides)

        # do not update if call is reentrant or outer db transaction exists
        busy = hasattr(cls.threadlocal,
                       cls.REENTRY_ATTR_NAME) or (db.is_in_transaction())

        if (not busy) and (force_update or age < 0 or age >= max_age):
            # Value of '0' disables all datastore overrides.
            if UPDATE_INTERVAL_SEC.get_value() == 0:
                cls.db_overrides = {}
                return cls.db_overrides

            # Load overrides from a datastore.
            setattr(cls.threadlocal, cls.REENTRY_ATTR_NAME, True)
            try:
                old_namespace = namespace_manager.get_namespace()
                try:
                    namespace_manager.set_namespace(
                        appengine_config.DEFAULT_NAMESPACE_NAME)
                    cls._load_from_db()
                finally:
                    namespace_manager.set_namespace(old_namespace)
            except Exception as e:  # pylint: disable=broad-except
                logging.error('Failed to load properties from a database: %s.',
                              str(e))
            finally:
                delattr(cls.threadlocal, cls.REENTRY_ATTR_NAME)

                # Avoid overload and update timestamp even if we failed.
                cls.last_update_time = now
                cls.update_index += 1

        return cls.db_overrides
Exemple #52
0
def _migrate_models(job, old_models, delete_old_models=True, put_new_models=True):
    azzert(db.is_in_transaction())
    new_models = list()

    for old_model in old_models:
        kwargs = copy_model_properties(old_model)

        # Patch props of (ServiceMenuDef, ServiceInteractionDef), (Broadcast), (ServiceProfile)
        for prop in ['staticFlowKey', 'message_flow', 'editableTranslationSet']:
            old_prop = kwargs.get(prop)
            if old_prop:
                kwargs[prop] = str(_create_new_key(job, db.Key(old_prop)))

        new_model = old_model.__class__(key=_create_new_key(job, old_model.key()),
                                        **kwargs)
        new_models.append(new_model)

    if put_new_models:
        _put_and_invalidate_cache_and_allocate_ids(*new_models)
    if delete_old_models:
        db.delete(old_models)

    return new_models
def change_number_of_shards(name, num):
    '''Change the number of shards to num'''
    try:
        config = _get_config(name)

        def transaction():
            if config.num_shards > num:
                for i in range(num, config.num_shards):
                    del_shard_name = name + str(i)
                    del_counter = ShardedCounter.get_by_key_name(
                        del_shard_name)

                    keep_index = random.randint(0, num - 1)
                    keep_shard_name = name + str(keep_index)
                    keep_counter = ShardedCounter.get_by_key_name(
                        keep_shard_name)

                    if keep_counter is None:
                        keep_counter = ShardedCounter(key_name=keep_shard_name,
                                                      name=name)
                    keep_counter.count += del_counter.count

                    keep_counter.put()
                    del_counter.delete()

            # if num > num_shards, we don't have to do data transfer

            config.num_shards = num
            config.put()

        if db.is_in_transaction():
            transaction()
        else:
            db.run_in_transaction(transaction)

    except Exception, e:
        logging.error("Error in change_number_of_shards: %s" % e)
Exemple #54
0
def getSpawnMailTaskTxn(context, parent=None, transactional=True):
    """Spawns a new Task that sends out an email with the given dictionary."""
    if not (context.get('to') or context.get('bcc')):
        context['body'] = context.get('body', '')[:10]
        logging.debug("Not sending email: '%s'", context)
        # no-one cares :(
        return lambda: None

    # TODO(daniel): drop this when DB models are not used anymore
    if not parent or isinstance(parent, db.Model):
        mail_entity = db_email_model.Email(context=json.dumps(context),
                                           parent=parent)
        transactional = ndb.in_transaction()
    else:
        mail_entity = ndb_email_model.Email(parent=parent.key,
                                            context=json.dumps(context))
        transactional = db.is_in_transaction()

    def txn():
        """Transaction to ensure that a task get enqueued for each mail stored.
    """
        mail_entity.put()

        if isinstance(mail_entity, db.Model):
            mail_entity_key = mail_entity.key()
        else:
            mail_entity_key = mail_entity.key.urlsafe()

        task_params = {'mail_key': str(mail_entity_key)}
        # Setting a countdown because the mail_entity might not be stored to
        # all the replicas yet.
        new_task = taskqueue.Task(params=task_params,
                                  url=SEND_MAIL_URL,
                                  countdown=5)
        new_task.add(queue_name='mail', transactional=transactional)

    return txn
Exemple #55
0
  def create(kind, property_name, value, target_key=None):
    """Create a UniquePropertyRecord.

    If called from within a transactional, there is no attempt to verify that
    the given combo of key/property_name/value doesn't already exist. It is
    assumed that one calling this function from within a transactional is already
    verifying that the combo is unique.

    Args:
      (see make_key_name)
      target_key: optional db.Model subclass or key pointing at any entity
      transactional: optional bool, whether to create in a transaction (True)

    Returns:
      newly-created UniquePropertyRecord key or None

    Raises:
      AssertionError: if value is None and allow_none is False
      ValueError: if kind is not a string or db.Model subclass
    """
    assert value is not None

    called_from_transaction = db.is_in_transaction()

    def _create():
      if not called_from_transaction:
        existing_record = UniquePropertyRecord.retrieve(kind, property_name, value)
        if existing_record:
          raise UniquePropertyRecordExistsError(existing_record.key().name())
      key_name = UniquePropertyRecord.make_key_name(kind, property_name, value)
      return UniquePropertyRecord(key_name=key_name, target_key=target_key).put()

    if not called_from_transaction:
      return db.run_in_transaction(_create)
    else:
      return _create()
def start_map(name,
              handler_spec,
              reader_spec,
              mapper_parameters,
              shard_count=None,
              output_writer_spec=None,
              mapreduce_parameters=None,
              base_path=None,
              queue_name=None,
              eta=None,
              countdown=None,
              hooks_class_name=None,
              _app=None,
              in_xg_transaction=False):
  """Start a new, mapper-only mapreduce.

  Args:
    name: mapreduce name. Used only for display purposes.
    handler_spec: fully qualified name of mapper handler function/class to call.
    reader_spec: fully qualified name of mapper reader to use
    mapper_parameters: dictionary of parameters to pass to mapper. These are
      mapper-specific and also used for reader initialization.
    shard_count: number of shards to create.
    mapreduce_parameters: dictionary of mapreduce parameters relevant to the
      whole job.
    base_path: base path of mapreduce library handler specified in app.yaml.
      "/mapreduce" by default.
    queue_name: taskqueue queue name to be used for mapreduce tasks.
      see util.get_queue_name.
    eta: absolute time when the MR should execute. May not be specified
      if 'countdown' is also supplied. This may be timezone-aware or
      timezone-naive.
    countdown: time in seconds into the future that this MR should execute.
      Defaults to zero.
    hooks_class_name: fully qualified name of a hooks.Hooks subclass.
    in_xg_transaction: controls what transaction scope to use to start this MR
      job. If True, there has to be an already opened cross-group transaction
      scope. MR will use one entity group from it.
      If False, MR will create an independent transaction to start the job
      regardless of any existing transaction scopes.

  Returns:
    mapreduce id as string.
  """
  if shard_count is None:
    shard_count = parameters.config.SHARD_COUNT
  if base_path is None:
    base_path = parameters.config.BASE_PATH

  if mapper_parameters:
    mapper_parameters = dict(mapper_parameters)
  if mapreduce_parameters:
    mapreduce_parameters = dict(mapreduce_parameters)
    if "base_path" not in mapreduce_parameters:
      mapreduce_parameters["base_path"] = base_path
  else:
    mapreduce_parameters = {"base_path": base_path}

  mapper_spec = model.MapperSpec(handler_spec,
                                 reader_spec,
                                 mapper_parameters,
                                 shard_count,
                                 output_writer_spec=output_writer_spec)

  if in_xg_transaction and not db.is_in_transaction():
    logging.warning("Expects an opened xg transaction to start mapreduce "
                    "when transactional is True.")

  return handlers.StartJobHandler._start_map(
      name,
      mapper_spec,
      mapreduce_parameters or {},
      base_path=base_path,
      queue_name=util.get_queue_name(queue_name),
      eta=eta,
      countdown=countdown,
      hooks_class_name=hooks_class_name,
      _app=_app,
      in_xg_transaction=in_xg_transaction)
Exemple #57
0
    def trans():
        sln_settings = get_solution_settings(service_user)
        order_settings = get_solution_order_settings(sln_settings)
        lang = sln_settings.main_language
        comment = None
        phone = None
        takeaway_time = None
        if order_type == ORDER_TYPE_SIMPLE:
            details = get_extended_details_from_tag(
                _get_value(steps[0], u'message_details'))
            if steps[1].answer_id == u"positive":
                picture_url = _get_value(steps[1], u'message_picture')
                att = AttachmentTO()
                att.content_type = AttachmentTO.CONTENT_TYPE_IMG_JPG
                att.download_url = picture_url
                att.name = translate(lang, SOLUTION_COMMON, u'picture')
                att.size = 0
                attachments = [att]
            else:
                picture_url = None
                attachments = []
            phone = _get_value(steps[2], u'message_phone')
            msg = common_translate(lang, SOLUTION_COMMON,
                                   'if-order-received') % {
                                       'remarks': details,
                                       'phone_number': phone
                                   }

        elif order_type == ORDER_TYPE_ADVANCED:
            with closing(StringIO()) as order:
                timezone_offset = datetime.datetime.now(
                    pytz.timezone(
                        sln_settings.timezone)).utcoffset().total_seconds()
                has_order_items = False
                for step in steps:
                    if step.step_id == u'message_phone':
                        phone = step.get_value()
                    elif step.step_id == u'message_comment':
                        comment = step.get_value()
                    elif step.step_id == u'message_advanced_order':
                        step_value = step.display_value.encode('utf-8')
                        if step_value:
                            has_order_items = True
                        order.write(step_value)
                    elif step.step_id == u'message_takeaway_time':
                        takeaway_time = int(step.get_value() - timezone_offset)
                picture_url = None
                attachments = []
                if comment:
                    if has_order_items:
                        order.write('\n\n')
                    c = '%s: %s' % (common_translate(
                        lang, SOLUTION_COMMON, 'reservation-comment'), comment)
                    order.write(
                        c.encode('utf-8') if isinstance(c, unicode) else c)
                details = get_extended_details_from_tag(
                    order.getvalue().decode('utf-8'))
                takeaway_datetime = datetime.datetime.fromtimestamp(
                    takeaway_time, tz=get_timezone(sln_settings.timezone))
                takeaway_time_str = format_datetime(takeaway_datetime,
                                                    locale=lang,
                                                    format='d/M/yyyy H:mm')

                msg = '%s:\n%s\n%s: %s\n%s: %s' % (
                    common_translate(lang, SOLUTION_COMMON,
                                     'order_received'), details,
                    common_translate(lang, SOLUTION_COMMON,
                                     'phone_number'), phone,
                    common_translate(lang, SOLUTION_COMMON,
                                     'takeaway_time'), takeaway_time_str)
        else:
            raise BusinessException('Unsupported order type %s', order_type)

        if not order_settings.manual_confirmation:
            # Waiting for follow-up message
            deferred.defer(_send_order_confirmation,
                           service_user,
                           lang,
                           message_flow_run_id,
                           member,
                           steps,
                           end_id,
                           end_message_flow_id,
                           parent_message_key,
                           tag,
                           result_key,
                           flush_id,
                           flush_message_flow_id,
                           service_identity,
                           user_details,
                           details,
                           _transactional=db.is_in_transaction())

        service_identity_user = create_service_identity_user_wo_default(
            service_user, service_identity)
        o = SolutionOrder(
            parent=parent_key_unsafe(service_identity_user, SOLUTION_COMMON))
        o.description = details
        o.phone_number = phone
        o.sender = SolutionUser.fromTO(user_details[0])
        o.timestamp = now()
        o.status = SolutionOrder.STATUS_RECEIVED
        o.picture_url = picture_url
        o.takeaway_time = takeaway_time
        o.user = user_details[0].toAppUser() if user_details else None

        message = create_solution_inbox_message(
            service_user, service_identity,
            SolutionInboxMessage.CATEGORY_ORDER, None, False, user_details,
            steps[2].received_timestamp, msg, True,
            [picture_url] if picture_url else [])
        o.solution_inbox_message_key = message.solution_inbox_message_key
        o.put()
        message.category_key = unicode(o.key())
        message.put()

        sln_i_settings = get_solution_settings_or_identity_settings(
            sln_settings, service_identity)
        sm_data = [{
            u"type": u"solutions.common.orders.update"
        }, {
            u"type":
            u"solutions.common.messaging.update",
            u"message":
            serialize_complex_value(
                SolutionInboxMessageTO.fromModel(message, sln_settings,
                                                 sln_i_settings, True),
                SolutionInboxMessageTO, False)
        }]
        send_message(service_user, sm_data, service_identity=service_identity)

        app_user = user_details[0].toAppUser()

        send_inbox_forwarders_message(
            service_user,
            service_identity,
            app_user,
            msg,
            dict(if_name=user_details[0].name, if_email=user_details[0].email),
            message_key=message.solution_inbox_message_key,
            attachments=attachments,
            reply_enabled=message.reply_enabled)
Exemple #58
0
def on_trans_rollbacked(func, *args, **kwargs):
    azzert(db.is_in_transaction())
    post_transaction_actions.append(False, func, *args, **kwargs)
Exemple #59
0
def start_map(name,
              handler_spec,
              reader_spec,
              mapper_parameters,
              shard_count=None,
              output_writer_spec=None,
              mapreduce_parameters=None,
              base_path=None,
              queue_name=None,
              eta=None,
              countdown=None,
              hooks_class_name=None,
              _app=None,
              in_xg_transaction=False):
    """Start a new, mapper-only mapreduce.

  Deprecated! Use map_job.start instead.

  If a value can be specified both from an explicit argument and from
  a dictionary, the value from the explicit argument wins.

  Args:
    name: mapreduce name. Used only for display purposes.
    handler_spec: fully qualified name of mapper handler function/class to call.
    reader_spec: fully qualified name of mapper reader to use
    mapper_parameters: dictionary of parameters to pass to mapper. These are
      mapper-specific and also used for reader/writer initialization.
      Should have format {"input_reader": {}, "output_writer":{}}. Old
      deprecated style does not have sub dictionaries.
    shard_count: number of shards to create.
    mapreduce_parameters: dictionary of mapreduce parameters relevant to the
      whole job.
    base_path: base path of mapreduce library handler specified in app.yaml.
      "/mapreduce" by default.
    queue_name: taskqueue queue name to be used for mapreduce tasks.
      see util.get_queue_name.
    eta: absolute time when the MR should execute. May not be specified
      if 'countdown' is also supplied. This may be timezone-aware or
      timezone-naive.
    countdown: time in seconds into the future that this MR should execute.
      Defaults to zero.
    hooks_class_name: fully qualified name of a hooks.Hooks subclass.
    in_xg_transaction: controls what transaction scope to use to start this MR
      job. If True, there has to be an already opened cross-group transaction
      scope. MR will use one entity group from it.
      If False, MR will create an independent transaction to start the job
      regardless of any existing transaction scopes.

  Returns:
    mapreduce id as string.
  """
    if shard_count is None:
        shard_count = parameters.config.SHARD_COUNT

    if mapper_parameters:
        mapper_parameters = dict(mapper_parameters)

    # Make sure this old API fill all parameters with default values.
    mr_params = map_job.JobConfig._get_default_mr_params()
    if mapreduce_parameters:
        mr_params.update(mapreduce_parameters)

    # Override default values if user specified them as arguments.
    if base_path:
        mr_params["base_path"] = base_path
    mr_params["queue_name"] = util.get_queue_name(queue_name)

    mapper_spec = model.MapperSpec(handler_spec,
                                   reader_spec,
                                   mapper_parameters,
                                   shard_count,
                                   output_writer_spec=output_writer_spec)

    if in_xg_transaction and not db.is_in_transaction():
        log.warning("Expects an opened xg transaction to start mapreduce "
                    "when transactional is True.")

    return handlers.StartJobHandler._start_map(
        name,
        mapper_spec,
        mr_params,
        # TODO(user): Now that "queue_name" is part of mr_params.
        # Remove all the other ways to get queue_name after one release.
        queue_name=mr_params["queue_name"],
        eta=eta,
        countdown=countdown,
        hooks_class_name=hooks_class_name,
        _app=_app,
        in_xg_transaction=in_xg_transaction)
def schedule_report_on_site_payments():
    deferred.defer(report_on_site_payments,
                   _transactional=db.is_in_transaction())