Exemple #1
0
    def _get_case_payload_batched(self, response, user, last_sync, synclog):
        synclog.save(**get_safe_write_kwargs())

        self.num_batches = 0
        sync_operation = BatchedCaseSyncOperation(user, last_sync)
        for batch in sync_operation.batches():
            self.num_batches += 1
            logger.debug(batch)

            # case blocks
            case_xml_elements = (
                xml.get_case_element(op.case, op.required_updates, self.version)
                for op in batch.case_updates_to_sync()
            )
            for case_elem in case_xml_elements:
                response.append(case_elem)

        sync_state = sync_operation.global_state
        synclog.cases_on_phone = sync_state.actual_owned_cases
        synclog.dependent_cases_on_phone = sync_state.actual_extended_cases
        synclog.save(**get_safe_write_kwargs())

        add_custom_parameter('restore_total_cases', len(sync_state.actual_relevant_cases))
        add_custom_parameter('restore_synced_cases', len(sync_state.all_synced_cases))

        # commtrack balance sections
        commtrack_elements = self.get_stock_payload(sync_state.all_synced_cases)
        for ct_elem in commtrack_elements:
            response.append(ct_elem)

        return response
Exemple #2
0
    def _get_case_payload(self, response, user, last_sync, synclog):
        sync_operation = user.get_case_updates(last_sync)
        synclog.cases_on_phone = [
            CaseState.from_case(c) for c in sync_operation.actual_owned_cases
        ]
        synclog.dependent_cases_on_phone = [
            CaseState.from_case(c) for c in sync_operation.actual_extended_cases
        ]
        synclog.save(**get_safe_write_kwargs())

        # case blocks
        case_xml_elements = (
            xml.get_case_element(op.case, op.required_updates, self.version)
            for op in sync_operation.actual_cases_to_sync
        )
        for case_elem in case_xml_elements:
            response.append(case_elem)

        add_custom_parameter('restore_total_cases', len(sync_operation.all_potential_cases))
        add_custom_parameter('restore_synced_cases', len(sync_operation.actual_cases_to_sync))

        # commtrack balance sections
        case_state_list = [CaseState.from_case(op.case) for op in sync_operation.actual_cases_to_sync]
        commtrack_elements = self.get_stock_payload(case_state_list)
        for ct_elem in commtrack_elements:
            response.append(ct_elem)

        return response
Exemple #3
0
    def _function3():
        add_custom_parameter('txn-key-1', 1)

        try:
            raise NotImplementedError(
                    'This is a test error and can be ignored.')
        except:
            record_exception(params={'err-key-2': 2, 'err-key-3': 3.0})

        raise RuntimeError('This is a test error and can be ignored.')
    def _function3():
        add_custom_parameter('txn-key-1', 1)

        try:
            raise NotImplementedError(
                    'This is a test error and can be ignored.')
        except:
            record_exception(params={'err-key-2': 2, 'err-key-3': 3.0})

        raise RuntimeError('This is a test error and can be ignored.')
Exemple #5
0
def lambda_handler(event, context):
    # At this point, we're handling an invocation. Cold start is over; this code runs for each invocation.

    # This is an example of a custom event. `FROM MyPythonEvent SELECT *` in New Relic will find this event.
    agent.record_custom_event("MyPythonEvent", {"zip": "zap"})
    # This attribute gets added to the normal AwsLambdaInvocation event
    agent.add_custom_parameter("customAttribute", "customAttributeValue")

    # As normal, anything you write to stdout ends up in CloudWatch
    print("Hello, world")

    return "Success!"
Exemple #6
0
def setup_user_by_token():
    user_token = request.headers.get('X-Wigo-User-Key')
    if user_token:
        try:
            user_id = get_user_id_for_key(user_token)

            # the current user should always get a fresh copy of themself
            model_cache.invalidate(user_id)

            user = User.find(user_id)
            g.user = user

            group = getattr(g, 'group', None)

            if group and not user.location_locked:
                # if a group was passed in via geo, switch the users group
                user.group_id = group.id
            elif user.group_id:
                # if the user has a group defined, use it
                group = Group.find(user.group_id)
                g.group = group
            else:
                # we need a group, so default to boston
                group = Group.find(code='boston')
                user.group_id = group.id
                g.group = group

            if not user.location_locked and hasattr(g, 'latitude') and hasattr(
                    g, 'longitude'):
                if user.modified <= (datetime.utcnow() -
                                     timedelta(minutes=30)):
                    user.latitude = round(g.latitude, 3)
                    user.longitude = round(g.longitude, 3)

            platform = request.headers.get('X-Wigo-Device')
            if not platform:
                platform = request.user_agent.platform
            if platform:
                platform = platform.lower()

            if platform in ('android', 'iphone', 'ipad'):
                user.set_custom_property('platforms', [platform])

            if user.is_changed():
                user.save()

            agent.add_custom_parameter('user_id', user.id)
            if user.group_id:
                agent.add_custom_parameter('group_code', group.code)

        except DoesNotExist:
            pass
Exemple #7
0
    def get_payload(self):
        user = self.user
        last_sync = self.sync_log

        self.validate()

        cached_payload = self.get_cached_payload()
        if cached_payload:
            return cached_payload

        start_time = datetime.utcnow()
        last_seq = str(get_db().info()["update_seq"])

        # create a sync log for this
        previous_log_id = last_sync.get_id if last_sync else None
        synclog = SyncLog(
            user_id=user.user_id,
            last_seq=last_seq,
            owner_ids_on_phone=user.get_owner_ids(),
            date=datetime.utcnow(),
            previous_log_id=previous_log_id
        )
        synclog.save(**get_safe_write_kwargs())

        # start with standard response
        batch_enabled = BATCHED_RESTORE.enabled(self.user.domain) or BATCHED_RESTORE.enabled(self.user.username)
        logger.debug('Batch restore enabled: %s', batch_enabled)
        if batch_enabled:
            response = StringRestoreResponse(user.username, items=self.items)
        else:
            response = EtreeRestoreResponse(user.username, items=self.items)

        # add sync token info
        response.append(xml.get_sync_element(synclog.get_id))
        # registration block
        response.append(xml.get_registration_element(user))

        # fixture block
        for fixture in generator.get_fixtures(user, self.version, last_sync):
            response.append(fixture)

        payload_fn = self._get_case_payload_batched if batch_enabled else self._get_case_payload
        response = payload_fn(response, user, last_sync, synclog)

        resp = str(response)
        duration = datetime.utcnow() - start_time
        synclog.duration = duration.seconds
        synclog.save()
        add_custom_parameter('restore_response_size', response.num_items)
        self.set_cached_payload_if_necessary(resp, duration)
        return resp
Exemple #8
0
def add_papertrail_new_relic_custom_parameter():

    current_time = str(time.time())
    hostname = socket.gethostname()
    log_url = "https://papertrailapp.com/systems/%s/events?time=%s"%(hostname,current_time)
    agent.add_custom_parameter("log_url" , log_url)

    return True


# print(add_papertrail_new_relic_custom_parameter())

# The method calls add_custom_parameters, which is part of New Relic's Ruby Agent. The method returns true because it is meant to be called as a controller callback.
#
# Second, ensure that the new method is called during each request (or those which you would like to be linked). Add a before_filter to the top of the ApplicationController class definition, like this:
#
# def before_filter :add_papertrail_new_relic_custom_parameter
# end
# Deploy this code change a
Exemple #9
0
def wrapper_register_ip(wrapped, instance, args, kwargs):
    def _bind_params(domain, hostname, ipaddr):
        return domain, hostname, ipaddr

    domain, hostname, ipaddr = _bind_params(*args, **kwargs)

    add_custom_parameter('domain', domain)
    add_custom_parameter('hostname', hostname)
    add_custom_parameter('ipaddr', ipaddr)

    return wrapped(*args, **kwargs)
Exemple #10
0
def record_custom_parameter(name, value):
    """
    add a custom parameter to the current request
    """
    if agent:
        agent.add_custom_parameter(name, value)
Exemple #11
0
    def _function3():
        add_custom_parameter('txn-key-1', 1)

        _function4()

        raise RuntimeError('This is a test error and can be ignored.')
Exemple #12
0
def event_related_change(group_id, event_id, is_global=False, deleted=False):
    from server.db import redis

    lock = redis.lock('locks:group_event_change:{}:{}'.format(
        group_id, event_id),
                      timeout=120)
    if lock.acquire(blocking=False):
        try:
            agent.add_custom_parameter('group_id', group_id)
            logger.debug('recording event change in group {}'.format(group_id))

            if not deleted:
                try:
                    event = Event.find(event_id)
                    event.deleted = False
                except DoesNotExist:
                    event = Event({'id': event_id, 'group_id': group_id})
                    event.deleted = True
            else:
                event = Event({'id': event_id, 'group_id': group_id})
                event.deleted = True

            group = Group.find(group_id)

            with wigo_db.transaction(commit_on_select=False):
                # add to the time in case other changes come in while this lock is taken,
                # or in case the job queues get backed up
                group.track_meta('last_event_change',
                                 time() + EVENT_CHANGE_TIME_BUFFER)

                if is_global or event.is_global:
                    groups_to_add_to = get_all_groups()
                else:
                    radius = 100
                    population = group.population or 50000
                    if population < 60000:
                        radius = 40
                    elif population < 100000:
                        radius = 60

                    groups_to_add_to = get_close_groups(
                        group.latitude, group.longitude, radius)

                num_visited = 0
                for group_to_add_to in groups_to_add_to:
                    if group_to_add_to.id == group.id:
                        continue

                    # index this event into the close group
                    if event.deleted is False:
                        event.update_global_events(group=group_to_add_to)
                    else:
                        event.remove_index(group=group_to_add_to)

                    # track the change for the group
                    group_to_add_to.track_meta(
                        'last_event_change',
                        time() + EVENT_CHANGE_TIME_BUFFER)

                    num_visited += 1

                    if (num_visited % 25) == 0:
                        lock.extend(30)

        finally:
            lock.release()
    def _function3():
        add_custom_parameter('txn-key-1', 1)

        _function4()

        raise RuntimeError('This is a test error and can be ignored.')