def _analyze_diagnostic_info(diagnostic_info, reply_info): ''' Determines what response should be sent based on `diagnostic_info` content. Returns a list of response IDs. Returns None if no response should be sent. ''' logger.debug_log('_analyze_diagnostic_info: enter') responses = None AddressParts = collections.namedtuple('AddressParts', ['name', 'plus', 'domain']) # Get our local email address parts match = _address_splitter.match(config['emailUsername']) local_email_parts = AddressParts(*match.groups()) # We'll break apart the "to" address (if applicable) to_parts = AddressParts(None, None, None) if reply_info.to: match = _address_splitter.match(reply_info.to) if not match: # Someone is messing with us raise ValueError('invalid email address: to-address match failed') to_parts = AddressParts(*match.groups()) # TODO: more flexible rules, not so hard-coded if utils.coalesce( diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'isPlayStoreBuild']): # No download links in Play Store email responses = ['generic_info'] elif to_parts.name == local_email_parts.name and \ to_parts.domain == local_email_parts.domain: # E.g., [email protected], [email protected] responses = [ 'download_new_version_links', # Disabling attachment responses for now. Not sure if # it's a good idea. Note that it needs to be tested. # 'download_new_version_attachments', ] elif to_parts.domain == local_email_parts.domain: # E.g., *@psiphon.ca responses = ['generic_info'] elif not reply_info.to and \ utils.coalesce(diagnostic_info, ('Metadata', 'platform'), utils.string_types): # Windows S3 feedback responses = ['download_new_version_links'] logger.debug_log('_analyze_diagnostic_info: exit: %s' % responses) return responses
def _get_email_reply_info(autoresponder_info): """Returns None if no reply info found, otherwise an instance of _ReplyInfo. Note that this function also validates the email address. """ logger.debug_log('_get_email_reply_info: enter') email_info = autoresponder_info.get('email_info') diagnostic_info = autoresponder_info.get('diagnostic_info') reply_info = None if email_info: reply_info = _ReplyInfo(email_info.get('address'), email_info.get('message_id'), email_info.get('subject'), email_info.get('to')) elif utils.coalesce(diagnostic_info, 'EmailInfo', required_types=dict): reply_info = _ReplyInfo(diagnostic_info['EmailInfo'].get('address'), diagnostic_info['EmailInfo'].get('message_id'), diagnostic_info['EmailInfo'].get('subject'), diagnostic_info['EmailInfo'].get('to')) elif utils.coalesce(diagnostic_info, 'Feedback', required_types=dict): reply_info = _ReplyInfo(diagnostic_info['Feedback'].get('email'), None, None, None) if not reply_info: logger.debug_log('_get_email_reply_info: no/bad reply_info, exiting') return None # Sometimes the recorded address looks like "<*****@*****.**>" if reply_info.address: reply_info.address = reply_info.address.strip('<>') if not reply_info.address: logger.debug_log( '_get_email_reply_info: no/bad reply_info.address, exiting') return None if reply_info.to: reply_info.to = reply_info.to.strip('<>') validator = email_validator.EmailValidator(fix=True, lookup_dns='mx') try: fixed_address = validator.validate_or_raise(reply_info.address) reply_info.address = fixed_address except: logger.debug_log( '_get_email_reply_info: address validator raised, exiting') return None logger.debug_log('_get_email_reply_info: exit') return reply_info
def _process_diagnostic_info(diagnostic_info): # We are not supporting old Android diagnostic data packages if coalesce(diagnostic_info, ('Metadata', 'platform')) == 'android' \ and coalesce(diagnostic_info, ('Metadata', 'version'), 0, int) < 2: return session = None try: session = _new_session() diagnostic_data = DiagnosticData.create(diagnostic_info) session.add(diagnostic_data) # Get the ID for the new DiagnosticData object, so we can FK it. session.flush() for table_class in _table_class_registry: objs = table_class.create(diagnostic_info) if not objs: continue if not isinstance(objs, (tuple, list)): objs = (objs,) for obj in objs: setattr(obj, 'diagnostic_data_id', diagnostic_data.id) session.add(obj) _sanitize_session(session) session.commit() except sqlalchemy.exc.IntegrityError as e: if session: session.rollback() # Note that MySQL discards milliseconds ("fractional seconds": http://dev.mysql.com/doc/refman/5.0/en/datetime.html). # This means that the "last timestamp" will actually be a smaller value # than the timestamp of the last-inserted record. This means that there # will always(?) be an attempt to re-insert the last-inserted record, # which will result in an exception. We need to ignore it and carry on. expected = e.orig.args[1].startswith('Duplicate entry') and e.orig.args[1].endswith("for key 'obj_id'") if not expected: raise except: if session: session.rollback() raise finally: if session: session.close()
def _analyze_diagnostic_info(diagnostic_info, reply_info): ''' Determines what response should be sent based on `diagnostic_info` content. Returns a list of response IDs. Returns None if no response should be sent. ''' logger.debug_log('_analyze_diagnostic_info: enter') responses = None AddressParts = collections.namedtuple('AddressParts', ['name', 'plus', 'domain']) # Get our local email address parts match = _address_splitter.match(config['emailUsername']) local_email_parts = AddressParts(*match.groups()) # We'll break apart the "to" address (if applicable) to_parts = AddressParts(None, None, None) if reply_info.to: match = _address_splitter.match(reply_info.to) if not match: # Someone is messing with us raise ValueError('invalid email address: to-address match failed') to_parts = AddressParts(*match.groups()) # TODO: more flexible rules, not so hard-coded if utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'isPlayStoreBuild']): # No download links in Play Store email responses = ['generic_info'] elif to_parts.name == local_email_parts.name and \ to_parts.domain == local_email_parts.domain: # E.g., [email protected], [email protected] responses = ['download_new_version_links', # Disabling attachment responses for now. Not sure if # it's a good idea. Note that it needs to be tested. # 'download_new_version_attachments', ] elif to_parts.domain == local_email_parts.domain: # E.g., *@psiphon.ca responses = ['generic_info'] elif not reply_info.to and \ utils.coalesce(diagnostic_info, ('Metadata', 'platform'), utils.string_types): # Windows S3 feedback responses = ['download_new_version_links'] logger.debug_log('_analyze_diagnostic_info: exit: %s' % responses) return responses
def _get_email_reply_info(autoresponder_info): """Returns None if no reply info found, otherwise an instance of _ReplyInfo. Note that this function also validates the email address. """ logger.debug_log('_get_email_reply_info: enter') email_info = autoresponder_info.get('email_info') diagnostic_info = autoresponder_info.get('diagnostic_info') reply_info = None if email_info: reply_info = _ReplyInfo(email_info.get('address'), email_info.get('message_id'), email_info.get('subject'), email_info.get('to')) elif utils.coalesce(diagnostic_info, 'EmailInfo', required_types=dict): reply_info = _ReplyInfo(diagnostic_info['EmailInfo'].get('address'), diagnostic_info['EmailInfo'].get('message_id'), diagnostic_info['EmailInfo'].get('subject'), diagnostic_info['EmailInfo'].get('to')) elif utils.coalesce(diagnostic_info, 'Feedback', required_types=dict): reply_info = _ReplyInfo(diagnostic_info['Feedback'].get('email'), None, None, None) if not reply_info: logger.debug_log('_get_email_reply_info: no/bad reply_info, exiting') return None # Sometimes the recorded address looks like "<*****@*****.**>" if reply_info.address: reply_info.address = reply_info.address.strip('<>') if not reply_info.address: logger.debug_log('_get_email_reply_info: no/bad reply_info.address, exiting') return None if reply_info.to: reply_info.to = reply_info.to.strip('<>') validator = email_validator.EmailValidator(fix=True, lookup_dns='mx') try: fixed_address = validator.validate_or_raise(reply_info.address) reply_info.address = fixed_address except: logger.debug_log('_get_email_reply_info: address validator raised, exiting') return None logger.debug_log('_get_email_reply_info: exit') return reply_info
def _get_lang_id_from_diagnostic_info(diagnostic_info): ''' Derive the lanague from `diagnostic_info` and return its ID/code. Returns `None` if the language can't be determined. ''' logger.debug_log('_get_lang_id_from_diagnostic_info: enter') lang_id = None # There can be different -- and better or worse -- ways of determining the # user's language depending on platform, the type of feedback, and so on. # Windows, with feedback message lang_id = lang_id or utils.coalesce( diagnostic_info, ['Feedback', 'Message', 'text_lang_code'], required_types=utils.string_types) if lang_id and lang_id.find('INDETERMINATE') >= 0: lang_id = None # All Windows feedback lang_id = lang_id or utils.coalesce(diagnostic_info, [ 'DiagnosticInfo', 'SystemInformation', 'OSInfo', 'LocaleInfo', 'language_code' ], required_types=utils.string_types) # All Windows feedback lang_id = lang_id or utils.coalesce(diagnostic_info, [ 'DiagnosticInfo', 'SystemInformation', 'OSInfo', 'LanguageInfo', 'language_code' ], required_types=utils.string_types) # Android, from email lang_id = lang_id or utils.coalesce( diagnostic_info, ['EmailInfo', 'body', 'text_lang_code'], required_types=utils.string_types) if lang_id and lang_id.find('INDETERMINATE') >= 0: lang_id = None # Android, from system language lang_id = lang_id or utils.coalesce( diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'language'], required_types=utils.string_types) logger.debug_log( '_get_lang_id_from_diagnostic_info: exiting with lang_id=%s' % lang_id) return lang_id
def create(cls, diagnostic_info): obj = cls() obj_id = coalesce(diagnostic_info, '_id') if not obj_id: # This shouldn't happen, since this field is added by MongoDB raise ValueError('diagnostic_info missing _id') obj.obj_id = str(obj_id) obj.datetime = coalesce(diagnostic_info, 'datetime') obj.platform = coalesce(diagnostic_info, ('Metadata', 'platform')) obj.version = coalesce(diagnostic_info, ('Metadata', 'version')) return obj
def forward(self, x: np.ndarray, edge_index: np.ndarray): """ Parameters ---------- x edge_index Returns ------- """ x_len, x_dim = x.shape z = sps.csr_matrix(x) z = z @ self.w # make sparse adjacency matrix adj = coalesce(edge_index, None, x_len, x_len).tocsr() # add self loops idx = range(x_len) adj[idx, idx] = 2 # compute message weights deg_inv_sqrt = sps.diags( np.array(adj.sum(axis=-1)).clip(1).reshape(-1)**-0.5) A = deg_inv_sqrt @ adj @ deg_inv_sqrt # do message passing z = A @ z if type(x) is np.ndarray: z = z.toarray() return z
def create(cls, diagnostic_info): # This is for both platforms objs = [] for entry in coalesce(diagnostic_info, ('DiagnosticInfo', 'DiagnosticHistory'), []): # DiagnosticHistory is a pretty free form set of data. The only type # of data that goes into it that we care about here are the # SelectedRegion entries. if coalesce(entry, 'msg') != 'SelectedRegion': continue obj = cls() obj.timestamp = coalesce(entry, 'timestamp') obj.regionCode = coalesce(entry, ('data', 'regionCode')) objs.append(obj) return objs
def create(cls, diagnostic_info): if coalesce(diagnostic_info, ('Metadata', 'platform')) != 'windows': return None objs = [] for sec_name, sec_type in (('AntiSpywareInfo', 'antispyware'), ('AntiVirusInfo', 'antivirus'), ('FirewallInfo', 'firewall')): for item in coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'SecurityInfo', sec_name), []): obj = cls() obj.sec_type = sec_type obj.displayName = coalesce(item, 'displayName') data_version = coalesce(item, 'version') [setattr(obj, key, val) for key, val in coalesce(item, data_version, {}).iteritems()] objs.append(obj) return objs
def _get_lang_id_from_diagnostic_info(diagnostic_info): ''' Derive the lanague from `diagnostic_info` and return its ID/code. Returns `None` if the language can't be determined. ''' logger.debug_log('_get_lang_id_from_diagnostic_info: enter') lang_id = None # There can be different -- and better or worse -- ways of determining the # user's language depending on platform, the type of feedback, and so on. # Windows, with feedback message lang_id = lang_id or utils.coalesce(diagnostic_info, ['Feedback', 'Message', 'text_lang_code'], required_types=utils.string_types) if lang_id and lang_id.find('INDETERMINATE') >= 0: lang_id = None # All Windows feedback lang_id = lang_id or utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'OSInfo', 'LocaleInfo', 'language_code'], required_types=utils.string_types) # All Windows feedback lang_id = lang_id or utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'OSInfo', 'LanguageInfo', 'language_code'], required_types=utils.string_types) # Android, from email lang_id = lang_id or utils.coalesce(diagnostic_info, ['EmailInfo', 'body', 'text_lang_code'], required_types=utils.string_types) if lang_id and lang_id.find('INDETERMINATE') >= 0: lang_id = None # Android, from system language lang_id = lang_id or utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'language'], required_types=utils.string_types) logger.debug_log('_get_lang_id_from_diagnostic_info: exiting with lang_id=%s' % lang_id) return lang_id
def sort_by_weekdays_and_seasons(command: commands.Command) -> str: """Special sort that takes into accounts days and seasons""" name = command.name return utils.coalesce( lambda: str(utils.DAYS.index(name)), lambda: str(tuple(utils.SEASONS.values()).index(name)), ignored_exc=ValueError, default=name)
def report_training(): data = read_yaml('../data/training') print("\n\n## {}\n".format(data['about'])) training = data['entries'] training.sort(key=lambda i: i['date'], reverse=True) for training in training: row = { 'type': training['title'], 'date': training['date'].strftime("%Y"), 'summary': coalesce(training, 'subtitle', 'category'), } row['indent'] = tab_indent(len(row['type'])) print("{type}{indent}{date}\t\n {summary}".format(**row))
def __merge_edges__(self, x, edge_index, edge_score): nodes_remaining = set(range(x.shape[0])) cluster = np.zeros(x.shape[0], dtype=edge_index.dtype) # argsort in ascending order and reverse edge_argsort = np.argsort(edge_score)[::-1] # Iterate through all edges i = 0 new_edge_indices = [] for edge_idx in edge_argsort.tolist(): source = edge_index[0, edge_idx] target = edge_index[1, edge_idx] if source in nodes_remaining and target in nodes_remaining: # contract the edge if it is not incident to a chosen node new_edge_indices.append(edge_idx) cluster[source] = i nodes_remaining.remove(source) if source != target: cluster[target] = i nodes_remaining.remove(target) i += 1 else: continue # The remaining nodes are simply kept. for node_idx in nodes_remaining: cluster[node_idx] = i i += 1 # We compute the new features as an addition of the old ones. new_num_nodes = np.max(cluster) + 1 new_x = np.zeros((new_num_nodes, x.shape[1]), dtype=x.dtype) new_x = scatter_add(new_x, cluster, x) N = new_x.shape[0] new_edge_index = coalesce(cluster[edge_index], None, N, N) new_edge_index = np.array(new_edge_index.nonzero(), dtype=edge_index.dtype) return new_x, new_edge_index
def report_awards(): data = read_yaml('../data/awards') print("\n\n## {}\n".format(data['about'])) awards = data['entries'] awards.sort(key=lambda i: i['dated'], reverse=True) for award in awards: type = award['type'] if award['type'] == 'Bonus Award': # be more specific award['category'] row = { 'type': type, 'date': award['dated'].strftime("%Y"), 'indent': tab_indent(len(type), left_margin=32), 'summary': coalesce(award, 'summary', 'team', 'category'), } print("{type} ({date}){indent}{summary}".format(**row))
def create(cls, diagnostic_info): if coalesce(diagnostic_info, ('Metadata', 'platform')) != 'windows': return None obj = cls() base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'OSInfo')) obj.os_name = coalesce(base, 'name') obj.os_version = coalesce(base, 'version') obj.os_architecture = coalesce(base, 'architecture') obj.os_servicePackMajor = coalesce(base, 'servicePackMajor') obj.os_servicePackMinor = coalesce(base, 'servicePackMinor') obj.os_freePhysicalMemoryKB = coalesce(base, 'freePhysicalMemoryKB') obj.os_freeVirtualMemoryKB = coalesce(base, 'freeVirtualMemoryKB') obj.os_language_lcid = coalesce(base, ('LanguageInfo', 'lcid_string')) obj.os_locale_lcid = coalesce(base, ('LocaleInfo', 'lcid_string')) # This is an array of country info, and we'll use the last one. # (For the hacky reason that in the ['CA', 'PR', 'US'] case we want 'US'.) country_code_info = coalesce(base, 'CountryCodeInfo', [None]) obj.os_country_code = coalesce(country_code_info[-1], 'country_code') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'NetworkInfo', 'Current', 'Internet')) obj.net_current_internet_connected = coalesce(base, 'internetConnected') obj.net_current_internet_conn_modem = coalesce(base, 'internetConnectionModem') obj.net_current_internet_conn_configured = coalesce(base, 'internetConnectionConfigured') obj.net_current_internet_conn_lan = coalesce(base, 'internetConnectionLAN') obj.net_current_internet_conn_proxy = coalesce(base, 'internetConnectionProxy') obj.net_current_internet_conn_offline = coalesce(base, 'internetConnectionOffline') obj.net_current_internet_ras_installed = coalesce(base, 'internetRASInstalled') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'NetworkInfo', 'Original', 'Internet')) obj.net_original_internet_connected = coalesce(base, 'internetConnected') obj.net_original_internet_conn_modem = coalesce(base, 'internetConnectionModem') obj.net_original_internet_conn_configured = coalesce(base, 'internetConnectionConfigured') obj.net_original_internet_conn_lan = coalesce(base, 'internetConnectionLAN') obj.net_original_internet_conn_proxy = coalesce(base, 'internetConnectionProxy') obj.net_original_internet_conn_offline = coalesce(base, 'internetConnectionOffline') obj.net_original_internet_ras_installed = coalesce(base, 'internetRASInstalled') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'NetworkInfo', 'Original', 'Proxy'), []) # There's an array of proxy info, typically one per network connection. # We don't need to export all of them, so we'll take the one that doesn't # have a named network connection. proxy = filter(lambda p: not coalesce(p, 'connectionName'), base) proxy = proxy[0] if proxy else (base[0] if base else None) obj.net_original_proxy_flags = coalesce(proxy, 'flags') obj.net_original_proxy_address = coalesce(proxy, 'proxy') obj.net_original_proxy_bypass = coalesce(proxy, 'bypass') obj.net_original_proxy_connectionName = coalesce(proxy, 'connectionName') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'Misc')) obj.misc_slowMachine = coalesce(base, 'slowMachine') obj.misc_mideastEnabled = coalesce(base, 'mideastEnabled') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'UserInfo')) obj.user_group_users = coalesce(base, 'inUsersGroup') obj.user_group_power = coalesce(base, 'inPowerUsersGroup') obj.user_group_guest = coalesce(base, 'inGuestsGroup') obj.user_group_admin = coalesce(base, 'inAdminsGroup') base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation', 'PsiphonInfo')) obj.psiphon_info_propagationChannelID = coalesce(base, 'PROPAGATION_CHANNEL_ID') obj.psiphon_info_sponsorID = coalesce(base, 'SPONSOR_ID') obj.psiphon_info_clientVersion = coalesce(base, 'CLIENT_VERSION') obj.psiphon_info_transport = coalesce(base, 'selectedTransport') obj.psiphon_info_splitTunnel = coalesce(base, 'splitTunnel') return obj
def _get_response_content(response_id, diagnostic_info): """Gets the response for the given response_id. diagnostic_info will be used to determine language and some content, but may be None. Returns a dict of the form: { subject: <subject text>, body_text: <body text>, body_html: <rich html body>, attachments: <attachments list, may be None> } Returns None if no response content can be derived. """ logger.debug_log('_get_response_content: enter') sponsor_name = utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'PsiphonInfo', 'SPONSOR_ID'], required_types=utils.string_types) prop_channel_name = utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'PsiphonInfo', 'PROPAGATION_CHANNEL_ID'], required_types=utils.string_types) # Use default values if we couldn't get good user-specific values sponsor_name = sponsor_name or config['defaultSponsorName'] prop_channel_name = prop_channel_name or config['defaultPropagationChannelName'] lang_id = _get_lang_id_from_diagnostic_info(diagnostic_info) # lang_id may be None, if the language could not be determined # Read in all translations HTML response_translations = [] for root, _, files in os.walk(_RESPONSES_DIR): for name in files: lang, ext = os.path.splitext(name) if ext != '.html': continue if lang == 'master': lang = 'en' with open(os.path.join(root, name)) as translation_file: translation = translation_file.read().decode('utf-8') # Strip leading and trailing whitespace so that we don't get extra # text elements in our BeautifulSoup translation = translation.strip() response_translations.append((lang, translation.strip())) # Reorder the array according to the detected language and _TOP_LANGS def lang_sorter(item): lang, _ = item rank = 999 try: if lang == lang_id: rank = -1 else: rank = _TOP_LANGS.index(lang) except ValueError: pass return rank response_translations.sort(key=lang_sorter) # Gather the info we'll need for formatting the email bucketname, email_address = psi_ops_helpers.get_bucket_name_and_email_address(sponsor_name, prop_channel_name) # Use default values if we couldn't get good user-specific values if not bucketname or not email_address: default_bucketname, default_email_address = \ psi_ops_helpers.get_bucket_name_and_email_address(config['defaultSponsorName'], config['defaultPropagationChannelName']) bucketname = bucketname or default_bucketname email_address = email_address or default_email_address # If, despite our best efforts, we still don't have a bucketname and # email address, just bail. if not bucketname or not email_address: logger.debug_log('_get_response_content: exiting due to no bucketname or address') return None # Collect the translations of the specific response we're sending subject = None bodies = [] for lang_id, html in response_translations: soup = BeautifulSoup(html) if not subject: subject = soup.find(id='default_response_subject') if subject: # Strip outer element subject = u''.join(unicode(elem) for elem in subject.contents).strip() body = soup.find(id=response_id) if body: # Strip outer element body = u''.join(unicode(elem) for elem in body.contents).strip() # The user might be using a language for which there isn't a # download page. Fall back to English if that's the case. home_page_url = psi_ops_helpers.get_s3_bucket_home_page_url( bucketname, lang_id if lang_id in psi_ops_helpers.WEBSITE_LANGS else 'en') download_page_url = psi_ops_helpers.get_s3_bucket_download_page_url( bucketname, lang_id if lang_id in psi_ops_helpers.WEBSITE_LANGS else 'en') format_dict = { '0': email_address, '1': download_page_url, '2': home_page_url } body = unicode(body) % format_dict bodies.append(body) # Render the email body from the Mako template body_html = _render_email({ 'lang_id': lang_id, 'response_id': response_id, 'responses': bodies }) # Get attachments. # This depends on which response we're returning. attachments = None if response_id == 'download_new_version_links': pass elif response_id == 'download_new_version_attachments': fp_windows = aws_helpers.get_s3_attachment('attachments', bucketname, psi_ops_helpers.DOWNLOAD_SITE_WINDOWS_BUILD_FILENAME) fp_android = aws_helpers.get_s3_attachment('attachments', bucketname, psi_ops_helpers.DOWNLOAD_SITE_ANDROID_BUILD_FILENAME) attachments = [(fp_windows, psi_ops_helpers.EMAIL_RESPONDER_WINDOWS_ATTACHMENT_FILENAME), (fp_android, psi_ops_helpers.EMAIL_RESPONDER_ANDROID_ATTACHMENT_FILENAME)] else: pass logger.debug_log('_get_response_content: exit') return { 'subject': subject, 'body_text': _html_to_text(body_html), 'body_html': body_html, 'attachments': attachments }
def _get_response_content(response_id, diagnostic_info): """Gets the response for the given response_id. diagnostic_info will be used to determine language and some content, but may be None. Returns a dict of the form: { subject: <subject text>, body_text: <body text>, body_html: <rich html body>, attachments: <attachments list, may be None> } Returns None if no response content can be derived. """ logger.debug_log('_get_response_content: enter') sponsor_name = utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'PsiphonInfo', 'SPONSOR_ID'], required_types=utils.string_types) prop_channel_name = utils.coalesce(diagnostic_info, ['DiagnosticInfo', 'SystemInformation', 'PsiphonInfo', 'PROPAGATION_CHANNEL_ID'], required_types=utils.string_types) # Use default values if we couldn't get good user-specific values sponsor_name = sponsor_name or config['defaultSponsorName'] prop_channel_name = prop_channel_name or config['defaultPropagationChannelName'] lang_id = _get_lang_id_from_diagnostic_info(diagnostic_info) # lang_id may be None, if the language could not be determined # Read in all translations HTML response_translations = [] for root, _, files in os.walk(_RESPONSES_DIR): for name in files: lang, ext = os.path.splitext(name) if ext != '.html': continue if lang == 'master': lang = 'en' with open(os.path.join(root, name)) as translation_file: translation = translation_file.read().decode('utf-8') # Strip leading and trailing whitespace so that we don't get extra # text elements in our BeautifulSoup translation = translation.strip() response_translations.append((lang, translation.strip())) # Reorder the array according to the detected language and _TOP_LANGS def lang_sorter(item): lang, _ = item rank = 999 try: if lang == lang_id: rank = -1 else: rank = _TOP_LANGS.index(lang) except ValueError: pass return rank response_translations.sort(key=lang_sorter) # Gather the info we'll need for formatting the email bucketname, email_address = psi_ops_helpers.get_bucket_name_and_email_address(sponsor_name, prop_channel_name) # Use default values if we couldn't get good user-specific values if not bucketname or not email_address: default_bucketname, default_email_address = \ psi_ops_helpers.get_bucket_name_and_email_address(config['defaultSponsorName'], config['defaultPropagationChannelName']) bucketname = bucketname or default_bucketname email_address = email_address or default_email_address # If, despite our best efforts, we still don't have a bucketname and # email address, just bail. if not bucketname or not email_address: logger.debug_log('_get_response_content: exiting due to no bucketname or address') return None # Collect the translations of the specific response we're sending subject = None bodies = [] for lang_id, html in response_translations: soup = BeautifulSoup(html) if not subject: subject = soup.find(id='default_response_subject') if subject: # Strip outer element subject = u''.join(unicode(elem) for elem in subject.contents).strip() body = soup.find(id=response_id) if body: # Strip outer element body = u''.join(unicode(elem) for elem in body.contents).strip() # The user might be using a language for which there isn't a # download page. Fall back to English if that's the case. home_page_url = psi_ops_helpers.get_s3_bucket_home_page_url( bucketname, lang_id if lang_id in psi_ops_helpers.WEBSITE_LANGS else 'en') download_page_url = psi_ops_helpers.get_s3_bucket_download_page_url( bucketname, lang_id if lang_id in psi_ops_helpers.WEBSITE_LANGS else 'en') faq_page_url = psi_ops_helpers.get_s3_bucket_faq_url( bucketname, lang_id if lang_id in psi_ops_helpers.WEBSITE_LANGS else 'en') # We're using numbers rather than more readable names here because # they're less likely to be accidentally modified by translators # (we think). format_dict = { '0': email_address, '1': download_page_url, '2': home_page_url, '3': faq_page_url } body = unicode(body) % format_dict bodies.append(body) # Render the email body from the Mako template body_html = _render_email({ 'lang_id': lang_id, 'response_id': response_id, 'responses': bodies }) # Get attachments. # This depends on which response we're returning. attachments = None if response_id == 'download_new_version_links': pass elif response_id == 'download_new_version_attachments': fp_windows = aws_helpers.get_s3_attachment('attachments', bucketname, psi_ops_helpers.DOWNLOAD_SITE_WINDOWS_BUILD_FILENAME) fp_android = aws_helpers.get_s3_attachment('attachments', bucketname, psi_ops_helpers.DOWNLOAD_SITE_ANDROID_BUILD_FILENAME) attachments = [(fp_windows, psi_ops_helpers.EMAIL_RESPONDER_WINDOWS_ATTACHMENT_FILENAME), (fp_android, psi_ops_helpers.EMAIL_RESPONDER_ANDROID_ATTACHMENT_FILENAME)] else: pass logger.debug_log('_get_response_content: exit') return { 'subject': subject, 'body_text': _html_to_text(body_html), 'body_html': body_html, 'attachments': attachments }
def _ensure_field_is_string(stringtype, data, fieldpath): prev_val = utils.coalesce(data, fieldpath) if prev_val is not None: utils.assign_value_to_obj_at_path(data, fieldpath, stringtype(prev_val))
def create(cls, diagnostic_info): # Android feedback gets submitted to the individual servers if coalesce(diagnostic_info, ('Metadata', 'platform')) != 'windows': return None results = coalesce(diagnostic_info, ('Feedback', 'Survey', 'results')) if not results: return None connectivity = filter(lambda r: coalesce(r, 'title') == 'Connectivity', results) speed = filter(lambda r: coalesce(r, 'title') == 'Speed', results) compatibility = filter(lambda r: coalesce(r, 'title') == 'Compatibility', results) obj = cls() obj.connectivity = coalesce(connectivity[0], 'answer') if connectivity else None obj.speed = coalesce(speed[0], 'answer') if speed else None obj.compatibility = coalesce(compatibility[0], 'answer') if compatibility else None # If the user was never able to connect to a server, then this feedback # can't be considered to be for a particular server. But if the user # has been (or is) connected to a server, we'll use the one they last # connected to. diagnostic_history = coalesce(diagnostic_info, ('DiagnosticInfo', 'DiagnosticHistory'), []) connected_msg = 'ConnectedServer' if coalesce(diagnostic_info, ('Metadata', 'platform')) != 'windows' else 'ConnectingServer' connected_entries = [entry for entry in diagnostic_history if coalesce(entry, 'msg') == connected_msg] if connected_entries: obj.server_id = coalesce(connected_entries[-1], ('data', 'ipAddress')) return obj
def create(cls, diagnostic_info): if coalesce(diagnostic_info, ('Metadata', 'platform')) != 'android': return None obj = cls() base = coalesce(diagnostic_info, ('DiagnosticInfo', 'SystemInformation')) obj.isRooted = coalesce(base, 'isRooted') obj.language = coalesce(base, 'language') obj.networkTypeName = coalesce(base, 'networkTypeName') obj.sys_build_tags = coalesce(base, ('Build', 'TAGS')) obj.sys_build_brand = coalesce(base, ('Build', 'BRAND')) obj.sys_build_version_release = coalesce(base, ('Build', 'VERSION__RELEASE')) obj.sys_build_version_codename = coalesce(base, ('Build', 'VERSION__CODENAME')) obj.sys_build_version_sdk = coalesce(base, ('Build', 'VERSION__SDK_INT')) obj.sys_build_cpu_abi = coalesce(base, ('Build', 'CPU_ABI')) obj.sys_build_model = coalesce(base, ('Build', 'MODEL')) obj.sys_build_manufacturer = coalesce(base, ('Build', 'MANUFACTURER')) obj.psiphon_info_sponsorID = coalesce(base, ('PsiphonInfo', 'SPONSOR_ID')) obj.psiphon_info_propagationChannelID = coalesce(base, ('PsiphonInfo', 'PROPAGATION_CHANNEL_ID')) obj.psiphon_info_clientVersion = coalesce(base, ('PsiphonInfo', 'CLIENT_VERSION')) BROWSER_ONLY_STATUS_ID = 'psiphon_running_browser_only' CONNECTION_STATUS_IDS = (BROWSER_ONLY_STATUS_ID, 'psiphon_running_whole_device') connection_log_ids = [coalesce(s, 'id') for s in coalesce(diagnostic_info, ('DiagnosticInfo', 'StatusHistory'), default_value=[], required_types=(list, tuple)) if coalesce(s, 'id') in CONNECTION_STATUS_IDS] if connection_log_ids: # The last connection log is the one we'll use as representative obj.browserOnly = connection_log_ids[-1] == BROWSER_ONLY_STATUS_ID return obj
def test_coalesce_should_return_none_if_no_non_empty_values(): result = coalesce(*EMPTY_VALUES) assert result is None
def test_coalesce_should_return_first_non_empty_value(): result = coalesce(*EMPTY_VALUES, 1) assert result == 1
def __init__(self, operators=None, allow_null=True): self.allow_null = allow_null self.operators = coalesce(operators, self.operators)