示例#1
0
文件: jobs.py 项目: princez1214/flask
def gen_zeroes_timeline(from_date, to_date, level='hour'):
    from_date = from_date.replace(minute=0, second=0, microsecond=0)
    to_date = to_date.replace(minute=0, second=0, microsecond=0)
    if level != 'hour':
        from_date = from_date.replace(hour=0)
        to_date = to_date.replace(hour=0)
    from_date = datetime_to_timestamp_ms(from_date)
    to_date = datetime_to_timestamp_ms(to_date)
    while from_date <= to_date:
        yield [from_date, 0]
        if level == 'hour':
            from_date += 60 * 60 * 1000  # 1 hour in ms
        else:
            from_date += 24 * 60 * 60 * 1000  # 1 day in ms
示例#2
0
    def test_post_sort_order(self):
        """ Test the regression where user profile posts were not shown
        in most recent first order. Also make sure the ordering is preserved
        across multiple conversations. """
        past_created = now() - timedelta(minutes=7*24*60)

        for idx in xrange(3):
            created = past_created + timedelta(minutes=idx)
            self._create_db_post(_created=created,
                                 content='I need a laptop bag' + str(idx),
                                 user_profile=self.up)

        conv = Conversation.objects()[:][0]
        conv.is_closed = True
        conv.save(is_safe=True)

        for idx in xrange(3):
            created = past_created + timedelta(minutes=3+idx)
            self._create_db_post(_created=created,
                                 content='I need a laptop bag' + str(idx),
                                 user_profile=self.up)

        data = dict(channel_id=str(self.inbound.id), user_name='joe')
        resp = self.client.get('/user_profile/json?%s' % urlencode(data))
        self.assertEqual(resp.status_code, 200)
        u_p_data = json.loads(resp.data)['list']
        last_post_date = datetime_to_timestamp_ms(now())
        for conv_data in u_p_data:
            for post in conv_data:
                self.assertTrue(post['created_at'] <= last_post_date)
                last_post_date = post['created_at']
示例#3
0
 def get_timeslot_index(self, item):
     for idx in xrange(self.NUM_TIMERANGE_SLOTS):
         if hasattr(item, 'created_at') and utc(item.created_at) > self._cached_from_date + timedelta(hours=self.time_increment * idx):
             continue
         else:
             break
     return datetime_to_timestamp_ms(self._cached_from_date + timedelta(hours=self.time_increment * idx))
示例#4
0
def insights(user):
    '''
    End point for launching analysis. Will create InsightsAnalsyis
    object on the fly and return an id.
    '''
    account_id = user.account.id
    application = user.account.selected_app
    if request.method == 'POST':
        data = request.json

        if 'account_id' not in data:
            data['account_id'] = account_id
        try:
            if data['metric_values'] == ['False'] or data['metric_values'] == ['True']:
                data['metric_values'] = ['true', 'false']
            data['analyzed_metric'] = data['analyzed_metric']
            data['created_at'] = datetime_to_timestamp_ms(datetime.now())
            data['user'] = user.id
            if not isinstance(data['metric_values'], list):
                return jsonify(ok=False, error="Expected metric values as a list")
            if not data['metric_values']:
                return jsonify(ok=True,
                               error='Missing required parameter metric values. Got value: %s' % data['metric_values'])
            if type(data['metric_values'][0]) not in (str, unicode):
                data['metric_values'] = [json.dumps(metric) for metric in data['metric_values']]

            analysis = InsightsAnalysis.objects.create(**data)
            analysis.start()
            return jsonify(ok=True, item=analysis.to_dict())
        except Exception, ex:
            import traceback
            traceback.print_exc()
            return jsonify(ok=False, error=str(ex))
示例#5
0
    def fill_multi_series_with_zeroes(self,
                                      multi_series,
                                      from_date=None,
                                      to_date=None):
        """
        Fills missing timestamp data in multi-series data,
        so that all series contain same timestamp data
        """
        if from_date and to_date:
            all_timestamps = []
            start_date = from_date
            while start_date <= to_date:
                all_timestamps.append(datetime_to_timestamp_ms(start_date))
                start_date = start_date + timedelta(hours=24)
        else:
            all_timestamps = set()
            for series in multi_series:
                all_timestamps.update(each[0] for each in series['data'])

            all_timestamps = sorted(all_timestamps)

        for series in multi_series:
            series_timestamps = [each[0] for each in series['data']]
            for i, timestamp in enumerate(all_timestamps):
                if timestamp not in series_timestamps:
                    series['data'].insert(i, [timestamp, 0])
示例#6
0
 def get_timestamp(self, mongo_group_entry):
     date = datetime(year=mongo_group_entry['_id']['year'], month=1, day=1)
     date = date + timedelta(days=mongo_group_entry['_id']['day'] - 1)
     if 'hour' in mongo_group_entry['_id']:
         date = date + timedelta(hours=mongo_group_entry['_id']['hour'])
     timestamp = datetime_to_timestamp_ms(date)
     return timestamp
示例#7
0
    def import_data(self, user, data_loader, *args, **kwargs):
        if not self.channel_id:
            raise ImproperStateError('event_type.channel_id must be set before importing data')

        self.import_id = datetime_to_timestamp_ms(now())
        self.before_import_status = self.sync_status
        self.user = user
        return super(EventType, self).import_data(user, data_loader)
示例#8
0
 def to_json(self, fields_to_show=None):
     json_data = super(LinUCBPredictorModel,
                       self).to_json(fields_to_show=fields_to_show)
     json_data.pop('packed_clf', None)
     json_data['status'] = self.state.status
     json_data['state'] = self.state.state
     json_data['last_run'] = self.last_run and datetime_to_timestamp_ms(
         self.last_run)
     return json_data
示例#9
0
 def initialize_timeslot_counts(self):
     time_results = {}
     self.time_increment = (self._cached_to_date - self._cached_from_date).days * 24 / float(self.NUM_TIMERANGE_SLOTS)
     for class_idx in range(-1, self.get_num_classes()):
         time_results[class_idx] = dict()
         for slot_idx in xrange(self.NUM_TIMERANGE_SLOTS):
             timeslot = datetime_to_timestamp_ms(self._cached_from_date + timedelta(hours=self.time_increment * slot_idx))
             time_results[class_idx][timeslot] = 0
     return time_results
示例#10
0
            def map_fn(job):
                ts = job.created_at.replace(minute=0, second=0, microsecond=0)
                if level == 'month':
                    ts = ts.replace(hour=0)
                timestamp = datetime_to_timestamp_ms(ts)

                if plot_by == 'count':
                    return timestamp, 1
                elif plot_by == 'time':
                    return timestamp, job.wait_time, job.execution_time
示例#11
0
def _to_ui(item, user=None):
    return {
        'id': str(item.id),
        'perm': item.perms(user) if user else 'r',
        'title': item.title,
        'users': item.users,
        'status': str(item.status),
        'platform': 'twitter',
        'created_at': datetime_to_timestamp_ms(item.created),
    }
示例#12
0
def patch_created_at(data, created_at=None):
    import copy

    if not created_at:
        created_at = now()

    res = copy.deepcopy(data)
    delta = datetime_to_timestamp_ms(created_at)
    res["data"]["twitter"]["created_at"] = datetime_to_string(
        created_at)  # "Tue, 08 Jul 2014 12:24:58 +0000"
    res["data"]["twitter"]["id"] = str(
        long(res["data"]["twitter"]["id"]) + delta)
    return res
示例#13
0
    def query_responses(post_ids, sort_by, user):
        t_start = now()
        response_ids = [ "%s:r" % post_id for post_id in post_ids ]
        response_filter = {"id__in": response_ids}
        if response_type:
            queries = []

            if 'review' in response_type:
                response_type.remove('review')
                response_type.extend(REVIEW_STATUSES)

            posted_matchable_filters = set(POSTED_MATCHABLE).intersection(set(response_type))
            response_status_filters  = set(RESPONSE_STATUSES).intersection(set(response_type))

            if posted_matchable_filters:
                queries.append({
                    #"status": "posted",
                    "posted_matchable__in": list(posted_matchable_filters)})

            if 'clicks' in response_type:
                queries.append({"clicked_count__gt": 0})

            if response_status_filters:
                queries.append({"status__in": response_type})

            if len(queries) > 1:
                response_filter.update({"$or": queries})
            elif queries:
                response_filter.update(queries[0])

        responses = list(Response.objects.find(**response_filter).limit(paging_params['limit']))

        total_count = len(responses)
        if sort_by == 'intention':
            responses.sort(key=lambda p: -p.intention_confidence)
        else:
            responses.sort(key=lambda p: p.created, reverse=True)

        app.logger.debug("Fetched %d responses in %s seconds" % (
                total_count, now() - t_start))

        res = [
            {'match'         : matchable_to_dict(resp.matchable, resp.relevance),
             'post'          : post_to_dict(resp.post, user),
             'response_type' : resp.status,
             'clicks'        : resp.clicked_count,
             'acted_date'    : datetime_to_timestamp_ms(resp.created)}
            for resp in responses
        ]

        return res
示例#14
0
 def to_dict(self):
     from solariat.utils.timeslot import datetime_to_timestamp_ms
     return {'id': str(self.id),
             'created_at': datetime_to_timestamp_ms(self.created),
             'members': [str(_.id) for _ in self.members],
             'smart_tags': [str(_.id) for _ in self.smart_tags],
             'channels': [str(_.id) for _ in self.channels],
             'journey_types': [str(_.id) for _ in self.journey_types],
             'journey_tags': [str(_.id) for _ in self.journey_tags],
             'funnels': [str(_.id) for _ in self.funnels],
             'predictors': [str(_.id) for _ in self.predictors],
             'roles': self.roles,
             'name': self.name,
             'description': self.description,
             'members_total': self.members_total}
示例#15
0
def _matchable_to_dict(matchable, user):
    return {
        'id': str(matchable.id),
        'created_at': datetime_to_timestamp_ms(matchable.created),
        'url': matchable.get_url(),
        'creative': matchable.creative,
        #'topics'          : matchable.intention_topics,
        'channels': [str(x.id) for x in matchable.channels if x],
        'status': 'active' if matchable.is_active else 'inactive',
        #'is_dispatchable' : matchable.is_dispatchable,
        'impressions': matchable.accepted_count,
        'perm': 'rw' if matchable.can_edit(user) else 'r',
        'clicks': matchable.clicked_count,
        'language': matchable.language
    }
示例#16
0
 def agents(account):
     """ Return a dict representation of a default group for all agents of an account """
     from solariat.utils.timeslot import datetime_to_timestamp_ms, now
     return {'id': default_agent_group(account),
             'created_at': datetime_to_timestamp_ms(now()),
             'members': [],
             'smart_tags': [],
             'channels': [],
             'journey_types': [],
             'journey_tags': [],
             'funnels': [],
             'predictors': [],
             'roles': [AGENT],
             'name': 'All Agents of account %s' % account.name,
             'description': 'All Agents of account %s' % account.name,
             'members_total': 'N/A'}
示例#17
0
    def get_timeline_results(self, pipeline, params):
        computed_pipeline = self.predictors_view.compute_pipeline(
            pipeline, params)
        LOGGER.info("Executing timeline query: " + str(pipeline))

        result = self.train_class.objects.coll.aggregate(
            computed_pipeline)['result']

        helper_structure = defaultdict(list)
        for entry in result:
            date = datetime(year=entry['_id']['year'], month=1, day=1)
            date = date + timedelta(days=entry['_id']['day'] - 1)
            if 'hour' in entry['_id']:
                date = date + timedelta(hours=entry['_id']['hour'])
            timestamp = datetime_to_timestamp_ms(date)
            helper_structure[self.analysis.analyzed_metric].append(
                [timestamp, entry['reward']])
            helper_structure['Count'].append([timestamp, entry['count']])
        result = []
        for key, value in helper_structure.iteritems():
            result.append(
                dict(label=key, data=sorted(value, key=lambda x: x[0])))
        return result
示例#18
0
文件: jobs.py 项目: princez1214/flask
    def prepare_plot_result(self, params, result):
        computed_metric = params['plot_by']
        if params['plot_type'] == 'time':
            helper_structure = defaultdict(list)
            for entry in result:
                date = datetime(year=entry['_id']['year'], month=1, day=1)
                date = date + timedelta(days=entry['_id']['day'] - 1)
                if 'hour' in entry['_id']:
                    date = date + timedelta(hours=entry['_id']['hour'])
                timestamp = datetime_to_timestamp_ms(date)

                if computed_metric == 'time':
                    for each in ['Wait', 'Execution']:
                        helper_structure[each].append([timestamp, entry[each]])
                else:
                    helper_structure[computed_metric].append(
                        [timestamp, entry[computed_metric]])

            result = []
            sort_by_timestamp = lambda values: sorted(values,
                                                      key=itemgetter(0))
            for key, value in helper_structure.viewitems():
                result.append(dict(label=key, data=sort_by_timestamp(value)))
        return result
示例#19
0
        def to_dict(resp):
            result = dict(
                created_at = datetime_to_timestamp_ms(resp.post.created),
                stats      = dict(
                    #influence     = 0.0,
                    #receptivity   = 0.0,
                    #actionability = 0.0, #resp.post.actionability,
                    intention     = {
                        'vote'  : resp.post.get_vote(user),
                        'type'  : resp.post.intention_name,
                        'score' : "%.2f" % resp.post.intention_confidence
                    },
                ),
                text       = resp.post.plaintext_content,
                topics     = resp.post.punks,
                url        = resp.post.url,
                id_str     = str(resp.post.id)
            )

            if resp.status == 'posted':
                result['response'] = dict(
                    post_id_str = str(resp.post.id),
                    creative    = resp.matchable.creative,
                    id          = str(resp.id),
                    landing_url = resp.matchable.get_url(),
                    topics      = resp.matchable.intention_topics,
                    stats       = {
                        "ctr": resp.matchable.ctr,
                        "impressions" : resp.matchable.accepted_count,
                        "relevance" : resp.relevance
                        }
                    )
            else:
                result['response'] = None

            return result
示例#20
0
def _item_to_dict(t, user=None):
    return {
        'id': str(t.id),
        'channel': str(t.parent_channel),
        'created_at': datetime_to_timestamp_ms(t.created),
        'title': t.title,
        'direction': t.direction,
        'description': t.description,
        'keywords': t.keywords,
        'skip_keywords': t.skip_keywords,
        'watchwords': t.watchwords,
        'labels': [str(x) for x in t.contact_label],
        'intentions': t.intentions,
        'status': t.status,
        'groups': [str(group) for group in t.acl],
        'adaptive_learning_enabled': t.adaptive_learning_enabled,
        'perm': t.perms(user) if user else 'r',
        'alert': {
            'is_active': t.alert_is_active,
            'posts_limit': t.alert_posts_limit,
            'posts_count': t.alert_posts_count,
            'emails': t.alert_emails
        },
    }
示例#21
0
from flask import request, jsonify

from solariat_bottle.db.user import User, ValidationToken
#from solariat_bottle.db.trials import Trial
from solariat_bottle.db.roles import ADMIN, AGENT, ANALYST
from solariat_bottle.app import app, logger, AppException
from solariat_bottle.db.account import Account

from solariat_bottle.utils.decorators import login_required
from solariat_bottle.utils.mailer import send_invitation_email
from solariat.utils.timeslot import datetime_to_timestamp_ms, parse_datetime, now, Timeslot, UNIX_EPOCH

from solariat_bottle.db.account import account_stats
from solariat_bottle.views.account import _create_trial

js_ts = lambda dt: dt and datetime_to_timestamp_ms(dt) or None

ERROR_DUP_USER = "******"
ERROR_DATE_RANGE = "End date should be greater than start date"


def account_name_from_email(email):
    account_name = email.replace('@', '_at_')
    account_name = account_name.replace('.', '_dot_')
    return account_name


def split_full_name(full_name):
    names = full_name.split(' ')
    if len(names) == 2:
        first_name = names[0]
示例#22
0
    def test_account_create_and_update(self):
        '''Test the various /configure/ endpoints for accounts'''

        # Create new accounts one an internal account and another a paid package account
        params = {
            'name': 'TestAcct',
            'account_type': 'Native',
            'package': 'Gold'
        }
        self.user.update(user_roles=[])
        resp = self._call_api('/accounts/json', assert_false=True, **params)

        # Log in as staff user and try again
        self.login(user=self.su)
        resp = self._call_api('/accounts/json', **params)

        self.assertTrue('account' in resp)
        account = resp['account']
        account_id = account['id']

        self.assertEqual(account['account_type'], 'Native')
        self.assertEqual(account['package'], 'Gold')

        # Update the account's pay package
        params = {
            'accountId': account_id,
            'accountName': 'TestAcct',
            'accountType': 'Native',
            'pricingPackage': 'Bronze'
        }
        resp = self._call_api('/configure/account_update/json', **params)

        self.assertTrue('data' in resp)
        self.assertEqual(resp['data'], {})

        # Test with a specified end_date parameter
        tomorrow = dt.date.today() + dt.timedelta(days=1)
        tomorrow_fmt = tomorrow.strftime('%m/%d/%Y')
        params = {
            'name': 'TestExpiringAcct',
            'package': 'Bronze',
            'end_date': tomorrow_fmt
        }

        resp = self._call_api('/accounts/json', **params)
        self.assertTrue(resp['account']['end_date'],
                        datetime_to_timestamp_ms(tomorrow))

        # Test GET
        params = {'accountId': account_id}

        resp = self._call_api(
            '/configure/account_update/json?accountId={}'.format(account_id),
            method="GET")
        self.assertTrue('data' in resp)

        self.assertEqual(resp['data']['accountType'], 'Native')
        self.assertEqual(resp['data']['pricingPackage'], 'Bronze')

        # Update via the PUT request
        note = "Test Note\nSecond Line"
        params = {
            'id': account_id,
            'name': 'TestAcct',
            'account_type': 'Native',
            'package': 'Gold',
            'notes': note
        }
        resp = self._call_api('/accounts/json', method="PUT", **params)

        self.assertTrue('account' in resp)
        account = resp['account']
        self.assertEqual(account['package'], 'Gold')
        self.assertEqual(account['notes'], note)

        params = {"name": "TestSkunk", "account_type": "Skunkworks"}
        resp = self._call_api('/accounts/json', **params)
        self.assertTrue('account' in resp)
        account = resp['account']
        self.assertEqual(account['account_type'], "Skunkworks")
        self.assertEqual(account['package'], 'Internal')

        #  Account delete
        #  Switch user account
        self.su.current_account = self.accounts[0]
        self.su.save()

        resp = self._call_api('/accounts/json?id={}'.format(account_id),
                              method="DELETE")

        resp = self._call_api('/account/json?name=TestAcct', method="GET")
        # If account is not present, return the current account
        self.assertEqual(resp['account']['name'], 'Acct1')
示例#23
0
def _json_account(acct,
                  user=None,
                  with_stats=False,
                  start_date=None,
                  end_date=None,
                  cache=None):
    if cache is None:
        cache = {}

    if acct:
        if (session.get('sf_oauthToken', False)
                and acct.access_token is not None
                and acct.account_type == 'Salesforce'):
            is_sf_auth = True
        else:
            is_sf_auth = False

        package = "Internal"
        if acct.package is not None:
            package = acct.package.name

        csm = acct.customer_success_manager
        if csm is not None:
            csm = csm.email

        adm = {'first': None, 'last': None, 'email': None}
        try:
            #  Note: Taking the first element might not be best
            if 'admins' in cache:
                admins = cache['admins']
            else:
                admins = [
                    admin_user for admin_user in acct.admins
                    if not admin_user.is_staff
                ]
            admin = admins[0]
        except IndexError:  # only staff are admins
            pass
        else:
            adm['first'] = admin.first_name
            adm['last'] = admin.last_name
            adm['email'] = admin.email

        a_dict = {
            'id':
            str(acct.id),
            'name':
            acct.name,
            'channels_count':
            acct.get_current_channels(status__ne='Archived').count(),
            'account_type':
            acct.account_type,
            'package':
            package,
            'created_at':
            datetime_to_timestamp_ms(acct.created),
            'is_current':
            user and user.current_account
            and user.current_account.id == acct.id,
            'is_admin':
            user and (acct.can_edit(user) or user.is_superuser),
            'is_super':
            user and user.is_superuser,
            'is_staff':
            user and user.is_staff,
            'is_analyst':
            user and user.is_analyst,
            'is_only_agent':
            user and user.is_only_agent,
            'signature':
            user and user.signature_suffix,
            'is_sf_auth':
            is_sf_auth,
            'end_date':
            acct.end_date and datetime_to_timestamp_ms(acct.end_date),
            'configured_apps':
            acct.available_apps.keys(),
            'available_apps':
            CONFIGURABLE_APPS.keys(),
            'selected_app':
            acct.selected_app,
            'customer_success_manager':
            csm,
            'notes':
            acct.notes,
            'admin':
            adm,
            'is_active':
            acct.is_active,
            'status':
            acct.status,
            'monthly_volume':
            0,
            'is_locked':
            acct.is_locked,
            'updated_at':
            datetime_to_timestamp_ms(acct.updated_at)
            if acct.updated_at else None,
            'recovery_days':
            acct.recovery_days,
            'event_processing_lock':
            acct.event_processing_lock,
        }

        if 'users_count' in cache:
            a_dict['users_count'] = cache['users_count']
        else:
            a_dict['users_count'] = len(
                [u for u in acct.get_users() if not u.is_system])

        if 'all_users_count' in cache:
            a_dict['all_users_count'] = cache['all_users_count']
        else:
            a_dict['all_users_count'] = len(
                [u for u in acct.get_all_users() if not u.is_system])

        if user and user.is_admin:
            a_dict['gse_api_key'] = acct.gse_api_key

        if with_stats and user:
            a_dict['stats'] = account_stats(acct, user, start_date, end_date)
            today = dt.datetime.now()
            start_of_month = dt.datetime(today.year, today.month, 1)
            a_dict['monthly_volume'] = account_stats(acct, user,
                                                     start_of_month, today)
            today_start, today_end = Timeslot(level='day').interval
            a_dict['daily_volume'] = account_stats(acct, user, today_start,
                                                   today_end)
            a_dict[
                'daily_volume_notification_emails'] = acct.daily_post_volume_notification.alert_emails
        return a_dict
    return None
示例#24
0
def post_to_dict_fast(post, user, channel, service_channel, outbound_channel,
                      user_profile, profile_cache, tags, has_conversation,
                      conversation_external_id):
    """ Converts a <Post> instance into a dict of defined structure.

        profile_cache - is an optional optimization, a caller may pass
                        a dict {<user_profile_id> : <user_profile_dict>}

        parent_channel- used to pull out smat tags per post, efficiently
    """

    #from datetime import datetime
    #start_ts = datetime.now()

    smart_tags = find_smart_tags(post, tags, user)
    #SKIP  "Y1", datetime.now() - start_ts
    intentions, topics = _from_speech_acts(post, user)

    #SKIP  "Y2", datetime.now() - start_ts
    serv_history = out_history = False
    if outbound_channel:
        out_history = user_profile.has_history(outbound_channel)
    if service_channel:
        serv_history = user_profile.has_history(service_channel)
    has_history = out_history or serv_history

    # NOTE: This is expensive and should be optimized
    res = {
        "id_str":
        str(post.id),
        "created_at":
        datetime_to_timestamp_ms(post.created_at),
        "text":
        post.plaintext_content,
        "lang":
        post.language,
        "intentions":
        intentions,
        "topics":
        topics,
        # this if below is for NPS post case (Alex G.)
        "user":
        profile_cache[post.user_profile_id] if post.user_profile_id else
        profile_cache['user_profile_objects'][post.actor_id].to_dict(),
        "url":
        post.url,
        "url_href_text":
        post.view_url_link,
        #"channel_assignments" : post.channel_assignments,
        "filter_status":
        channel and post_assignment_to_status(post, channel) or None,
        "on_outbound_chan":
        not channel or not (channel.is_service or channel.is_inbound),
        "channel_id":
        channel and str(channel.id) or None,
        "channel_platform":
        channel and str(channel.platform) or None,
        "smart_tags":
        smart_tags,
        "has_history":
        has_history,
        "has_conversation":
        has_conversation,
        "conversation_external_id":
        conversation_external_id,
        #"reply_to"            : post.reply_to,
        "stats": {
            "intention": {
                "type": post.intention_name,
                "score": "%.2f" % post.intention_confidence,
                "vote": post.get_vote(user)
            },
            #"actionability" : post.actionability,
            #"influence"     : 0,
            #"receptivity"   : 0
        },
    }

    res.update(post.platform_specific_data(outbound_channel))

    #SKIP  "Y4", datetime.now() - start_ts
    return res
示例#25
0
def list_channels(user):
    data = request.json or {}
    account = parse_account(user, data)

    try:
        results = []
        if data.get('widget'):
            channels = filtered_channels(find_channels(user, account, status='Active'))
            for channel in channels:
                results.append({
                    "id": str(channel.id),
                    "title": channel.title})

        elif data.get('primitive'):
            channels = filtered_channels(find_channels(user, account),
                                         filter_service=True,
                                         filter_compound=True)
            for channel in channels:
                results.append({
                    "id": str(channel.id),
                    "title": channel.title,
                    "platform": channel.platform,
                    "is_dispatchable": channel.is_dispatchable})

        elif data.get('service'):
            channels = find_channels(user, account, channel_class=ServiceChannel)
            for channel in channels:
                results.append({
                    "id": str(channel.id),
                    "title": channel.title})

        else:
            channels = filtered_channels(find_channels(user, account))

            if data.get('outbound') or request.args.get('outbound'):
                channels = [ch for ch in channels if ch.is_dispatchable]

            channel_stats_map = {}

            if data.get('stats'):
                from_, to_ = parse_date_interval(data.get('from'), data.get('to'))
                level = guess_timeslot_level(from_, to_)
                from operator import itemgetter
                channel_ids = map(itemgetter('id'), channels)
                channel_stats_map = aggregate_stats(
                    user, channel_ids, from_, to_, level,
                    aggregate=(
                        'number_of_posts',
                        'number_of_false_positive',
                        'number_of_true_positive',
                        'number_of_false_negative'
                    )
                )
            for channel in channels:
                stats = channel_stats_map.get(str(channel.id))
                channel_dict = {
                    "id"                    : str(channel.id),
                    "title"                 : channel.title,
                    "type_name"             : channel.__class__.__name__,
                    "type"                  : "channel",
                    "parent"                : str(channel.parent_channel or ""),
                    "status"                : channel.status,
                    "description"           : channel.description,
                    "created_at"            : datetime_to_timestamp_ms(channel.created),
                    "platform"              : channel.platform,
                    "account"               : channel.account and channel.account.name,
                    "perm"                  : 'r' if channel.read_only else channel.perms(user),
                    "facebook_page_ids"     : channel.facebook_page_ids if hasattr(channel, 'facebook_page_ids') else None,
                    "facebook_access_token" : channel.facebook_access_token if hasattr(channel, 'facebook_access_token') else None,
                    "access_token_key"      : channel.access_token_key if hasattr(channel, 'access_token_key') else None,
                    "access_token_secret"   : channel.access_token_secret if hasattr(channel, 'access_token_secret') else None,
                    "twitter_handle"        : channel.twitter_handle if hasattr(channel, 'twitter_handle') else None,
                    "is_compound"           : channel.is_compound,
                    "stats"                 : stats}
                results.append(channel_dict)


        results.sort(key=lambda x: x['title'])
        return jsonify(ok=True, list=results)

    except Exception, exc:
        app.logger.error('error on /channels/json',
                         exc_info=True)
        return jsonify(ok=False, error=str(exc))