def tracking_channel_languages(user): ''' Handler for addition and deletion of languages for a tracking channel. ''' if request.json is not None: data = request.json else: data = request.args if 'channel_id' not in data: return jsonify(ok=False, error='channel_id should be provided') try: channel = Channel.objects.get_by_user(user, data['channel_id']) except Channel.DoesNotExist: return jsonify(ok=False, error='Channel %s does not exist' % data['channel_id']) old_data = {"languages": channel.langs} if request.method == 'GET': from solariat.utils.lang.support import lang_to_ui return jsonify(ok=True, item=map(lang_to_ui, channel.langs)) if request.method == 'POST': if 'language' not in data: return jsonify(ok=False, error='language should be provided') else: if isinstance(channel, ServiceChannel): channel.set_allowed_langs([get_lang_code(data['language'])]) new_data = {"languages": channel.langs} AccountEvent.create_by_user(user=user, change='Languages modifications', old_data=old_data, new_data=new_data) return jsonify(ok=True) else: return jsonify(ok=False, error="Incorrect channel type") if request.method == 'DELETE': if 'language' not in data: return jsonify(ok=False, error='language should be provided') else: if isinstance(channel, ServiceChannel): channel.remove_langs([get_lang_code(data['language'])]) new_data = channel.langs AccountEvent.create_by_user(user=user, change='Languages modifications', old_data={'langs': old_data}, new_data={'langs': new_data}) return jsonify(ok=True) else: return jsonify(ok=False, error="Incorrect channel type") return jsonify(ok=True)
def set_languages_param(r, default=None): """Sets up languages list. If list from UI is empty - fill it with current channel languages :param r: mutated request parameters dict """ lang_key = 'languages' if lang_key in r: if r[lang_key]: r[lang_key] = [get_lang_code(lang) for lang in r[lang_key]] else: channel = r['channel'] if default is not None: r[lang_key] = default elif hasattr(channel, 'langs'): r[lang_key] = r['channel'].langs
def parse_language(language): if isinstance(language, Language): lang = language elif isinstance(language, (basestring, int)): # assume confidence=1.0 and lang is language code or int id lang = Language([get_lang_code(language), 1.0]) elif isinstance(language, dict) and \ 'tag' in language and 'confidence' in language: # datasift bot format, e.g. {"tag": "en", "confidence": 0.80} lang = Language([language['tag'], language['confidence']]) else: raise TypeError(u"create_post: unexpected type %s " u"for lang keyword argument '%s' " % ( type(language), language)) UNDEFINED = 'und' assert lang.lang in LANG_MAP or lang.lang == UNDEFINED, u"Unexpected language: %s" % lang.lang return lang
def compute_csdl(self): """ For the given subscription/channel, compute and compile the datasift CSDL we are going to use to create the actual datasift description. :returns A complied hash of the CSDL computed for the given subscription/channel """ csdl_data = get_csdl_data( [self.channel.inbound_channel, self.channel.outbound_channel]) lang_code = get_lang_code(self.language) if lang_code not in LANG_MAP: lang_code = None csdl_string = generate_csdl(*csdl_data, language=lang_code) datasift_response = datasift_compile(csdl_string) LOGGER.info(u"%s.compute_csdl: %s" % (self.__class__.__name__, log_csdl(csdl_data, csdl_string, datasift_response))) return datasift_response
def get_all_languages(user): request_data = _get_request_data() language_set = request_data.get('languageSet', 'all') force_fetch = request_data.get('forceFetch', False) if language_set == 'twitter': twitter_langs_key = '/languages/all/json?languageSet=twitter' from solariat_bottle.utils.cache import MongoDBCache cache = MongoDBCache() langs = cache.get(twitter_langs_key) if langs is None or force_fetch: langs = get_twitter_supported_languages() langs = sorted(map( lang_to_ui, set([get_lang_code(lang['code']) for lang in langs])), key=operator.itemgetter('title')) one_week = 60 * 60 * 24 * 7 cache.set(twitter_langs_key, langs, timeout=one_week) return jsonify(ok=True, list=langs) else: # list all currently supported langs languages = get_all_detected_langs() return jsonify(ok=True, list=languages)
'top-topics', # extended reports 'topics', # analytics plots } is_plot_by = lambda v: v in {'time', 'distribution'} is_sort_by = lambda v: v in {'time', 'confidence'} is_topic_plot = lambda v: v in { 'sentiment', 'top-topics', 'topics', 'missed-posts' } is_partial_trend = lambda v: v in { 'response-time', 'response-volume', 'inbound-volume' } is_problem = lambda v: v in {'top-topics'} is_cloud_type = lambda v: v in {'none', 'delta', 'percent'} is_cloud_type_or_none = lambda v: v is None or is_cloud_type(v) is_language_code = lambda v: get_lang_code( v) in LANG_MAP # allows lang code or name ("en"|"English") is_labeling_strategy = lambda v: v in {'default', 'channel', 'event', None} all_p_statuses = lambda l: all(map(is_p_status, l or [])) all_intentions = lambda l: all(map(is_intention, l or [])) all_topic_descs = lambda l: all(map(is_topic_desc, l or [])) all_languages = lambda l: all(map(is_language_code, l or [])) ALL_TOPICS_desc = dict(topic=ALL_TOPICS, topic_type='node') default_statuses_by_plot_type = { 'response-time': [SpeechActMap.ACTUAL], 'response-volume': [SpeechActMap.ACTUAL], 'sentiment': [SpeechActMap.ACTIONABLE, SpeechActMap.ACTUAL], 'missed-posts': [SpeechActMap.ACTIONABLE], 'inbound-volume': [SpeechActMap.ACTIONABLE, SpeechActMap.ACTUAL],
def make_lang_features(languages): return [(lang_id, get_lang_code(lang_id)) for lang_id in (languages or [])]
def __str__(self): return "%s(agent=%s, is_leaf=%-5s, intention='%s', language='%s', topic_count=%d)" % ( self.__class__.__name__, self.agent, bool(self.is_leaf), SATYPE_ID_TO_NAME_MAP.get(str(self.intention), self.intention), get_lang_code(self.language), self.topic_count)