def db_reset_all_hashtags(db=None, hashtag=None): mongo_db_name = environ.get('mongo_db_name') mongo_hashtags_col_name = environ.get('mongo_hashtags_col_name') assert vyperapi.is_not_none(db), 'There is no db.' assert vyperapi.is_not_none( mongo_db_name), 'There is no mongo_db_name.' assert vyperapi.is_not_none( mongo_hashtags_col_name), 'There is no mongo_hashtags_col_name.' tb_name = mongo_db_name col_name = mongo_hashtags_col_name table = db[tb_name] coll = table[col_name] try: ts_time = _utils.timeStamp(offset=0, use_iso=True) coll.updateMany({}, [{ "$unset": attr }, { "$set": { "modified": ts_time } }]) return True except: return False
def most_recent_number_of_days(bucket, num_days=30): ''' To Do: Do some analysis to see if there are any articles that have not been tweeted recently? (Whatever thay means.) ''' period_secs = (normalize_int_from_str(num_days) * 24 * 60 * 60) thirty_days_ago = datetime.fromisoformat( _utils.timeStamp(offset=-period_secs, use_iso=True)) period_secs += 3600 # Daylight Savings Time issue? Or typical translation bias? new_bucket = [] if (isinstance( bucket, list)) else {} if (isinstance(bucket, dict)) else None if (new_bucket is not None): for ts in bucket if (isinstance(bucket, list)) else bucket.keys() if ( isinstance(bucket, dict)) else []: if (len(ts.split('T')) > 1): dt = datetime.fromisoformat(ts) delta = dt - thirty_days_ago __is__ = delta.total_seconds() < period_secs if (__is__): if (isinstance(bucket, list)): new_bucket.append(ts) elif (isinstance(bucket, dict)): new_bucket[ts] = bucket.get(ts) else: new_bucket[ts] = bucket.get(ts) assert len( new_bucket) > 0, 'There cannot be less than one item after an update.' return new_bucket
def rotate(self): _fname = self.logFileName logging.info('Rotate Log for "%s".' % (_fname)) ts = _utils.timeStamp() if (sys.platform == 'win32'): ts = ts.replace(':', '-') _fh = open('%s_%s.log' % ('_'.join(_fname.split('_')[0:-1]), ts), 'w') self.f.flush() self.f.seek(0) _fh.writelines('\n'.join(self.f.readlines())) _fh.close() self.f.truncate()
def __init__(self, *args, **kwargs): self.callback = dummy_callback smtpd.SMTPServer.__init__(self, *args, **kwargs) self.mailboxFile = None self.multi_file = True self.is_debugging = False self.use_html = True self.author = 'Vyper Logix Corp. <http://www.vyperlogix.com>' self.__author__ = self.author self.copyright = '(c). Copyright 1990-%s, Vyper Logix Corp., All Right Reserved., You must ontain the right to use.' % ( _utils.timeStamp(format=_utils.formatDate_YYYY())) self.__copyright__ = self.copyright self.cwd = os.path.abspath(os.curdir)
def record_results(self,results): if (results != None): _name = misc.funcName() _dbx = self.dbx ts = _utils.timeStamp() nk1 = len(_dbx.keys()) _keys = _dbx.normalizedSortedKeys() if (len(_keys) > 0): try: _last_i = int(_keys[0].split(',')[0]) except: _last_i = -1 else: _last_i = 0 if (_last_i > -1): _dbx['%d,%s' % (_last_i+1,ts)] = results nk2 = len(_dbx.keys()) logging.info('(%s) :: There were %d key%s but now %d key%s in the file named %s.' % (_name,nk1,'s' if nk1 > 1 else '',nk2,'s' if nk2 > 1 else '',self.c_dbxName)) for k in _keys[0:5]: logging.info('(%s) :: %s.' % (_name,k)) _dbx.sync() _dbx.close() else: logging.warning('Unable to record the results due to there are no results to record because results are of type "%s".' % (type(results)))
def process_message(self, peer, mailfrom, rcpttos, data): f_unpack = lambda foo, key: foo[key][0] if (misc.isList(foo[key]) ) else foo[key] try: SmtpMailsinkServer._count_messages += 1 try: d = self.parseMessage(data) except: exc_info = sys.exc_info() info_string = '\n'.join(traceback.format_exception(*exc_info)) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error) if (self.multi_file): _toAddr = f_unpack(d, const_to_symbol) _path = _utils.safely_mkdir(fpath=self.cwd, dirname=os.sep.join( ['mailboxes', _toAddr])) _fMbx = self.mailboxFile self.mailboxFile = open( '%s.%s' % (os.sep.join([ _path, _utils.timeStamp().replace(':', '') ]), 'html' if (self.use_html) else 'txt'), 'w') info_string = 'DEBUG: self.mailboxFile is "%s".' % ( self.mailboxFile) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error) if self.mailboxFile is not None: #_utils.print_stderrout("Mail From is %s\n" % mailfrom) try: io_buffer = _utils.stringIO() if (self.is_debugging): d.prettyPrint(prefix='', title='Mail Parts', fOut=io_buffer) else: if (self.use_html): print >> io_buffer, self.renderHTML( f_unpack(d, const_subject_symbol), f_unpack(d, const_body_symbol)) else: print >> io_buffer, '%s' % (f_unpack( d, const_subject_symbol)) print >> io_buffer, '%s' % ('\n'.join( f_unpack(d, const_body_symbol))) s = io_buffer.getvalue() self.mailboxFile.write(s) except: exc_info = sys.exc_info() info_string = '\n'.join( traceback.format_exception(*exc_info)) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error) #self.mailboxFile.write( "="*80 ) #self.mailboxFile.write( "\n\n" ) self.mailboxFile.flush() if (self.multi_file): self.mailboxFile.close() self.mailboxFile = _fMbx else: print >> sys.stderr, 'ERROR: self.mailboxFile is "%s".' % ( self.mailboxFile) if (callable(self.callback)): try: info_string = 'DEBUG: mailfrom is "%s", rcpttos is "%s".' % ( mailfrom, rcpttos) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error) self.callback(d, mailfrom, rcpttos, data) except: exc_info = sys.exc_info() info_string = '\n'.join( traceback.format_exception(*exc_info)) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error) else: logMessage( 'Cannot issue callback because callback is not callable.', ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.warning) info_string = 'SmtpMailsinkServer._count_messages=%d' % ( SmtpMailsinkServer._count_messages) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.info) except: exc_info = sys.exc_info() info_string = '\n'.join(traceback.format_exception(*exc_info)) logMessage(info_string, ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.error)
def renderHTML(self, subj, body): from vyperlogix.html import myOOHTML as oohtml h_html = oohtml.Html() def renderBody(_body): h = oohtml.Html() h_html = h.tag(oohtml.oohtml.HTML) h_body = h_html.tag(oohtml.oohtml.BODY) h_Content = h_body.tag(oohtml.oohtml.DIV, id="content", style="background-color: white") _body = _body[0] if (misc.isList(_body)) and (len(_body) == 1) else _body h_Content.text(_body[0]) if (len(_body) > 1): for b in _body[1:]: h_Content.tagOp(oohtml.oohtml.BR) h_Content.text(b) return h_Content.toHtml() if (not misc.isString(subj)): subj = str(subj) if (not misc.isList(body)): body = [body] if (misc.isString(subj)) and (misc.isList(body)): h_html.text(oohtml.oohtml.DOCTYPE_40_TRANSITIONAL) _title = "Vyper Logix SMTP Email Proxy (%s v.%s)" % ( ObjectTypeName.typeName(self), __version__) html_html = h_html.tag(oohtml.oohtml.HTML) head_html = html_html.tag(oohtml.oohtml.HEAD) head_html.tagOp(oohtml.oohtml.META, http_equiv=oohtml.oohtml.CONTENT_TYPE, content=oohtml.oohtml.TEXT_HTML_CHARSET_ISO_8859_1) head_html.metas(( oohtml.oohtml.AUTHOR, '%s :: %s' % (self.author, self.__author__) ), (oohtml.oohtml.KEYWORDS, _title), ( oohtml.oohtml.DESCRIPTION, "The contents of this email are considered to be confidential unless otherwise specified." ), (oohtml.oohtml.ROBOTS, oohtml.oohtml.ALL)) head_html.tagTITLE( '©%s, Vyper Logix Corp., All Rights Reserved., %s' % (_utils.timeStamp(format=_utils.formatDate_YYYY()), _title)) body_html = html_html.tag(oohtml.oohtml.BODY) idContent = body_html.tag(oohtml.oohtml.DIV, id="content", style="background-color: white") rows = [] rows.append(tuple(['%s' % (subj)])) rows.append(tuple([renderBody(body)])) rows.append(tuple(['<BR/><BR/><BR/><BR/>'])) rows.append( tuple([ self.copyright if (misc.isString(self.copyright)) and (len(self.copyright) > 0) else self.__copyright__ ])) idContent.html_table(rows) pass else: logMessage( 'subj must be of type str and body must be of type list rather than of types "%s" and "%s", respectively.' % (type(subj), type(body)), ObjectTypeName.typeName(self), misc.funcName(), _logging=standardLogging.LoggingLevels.warning) return h_html.toHtml()
def __handle_one_available_hashtag(api=None, service_runner=None, environ=None, logger=None): ts_follower_time = _utils.timeStamp(offset=0, use_iso=True) assert service_runner, 'Missing service_runner.' assert environ, 'Missing environ.' me = api.me() if (0): words = service_runner.exec( word_cloud, get_final_word_cloud, **plugins_handler.get_kwargs(environ=environ, callback=None, logger=logger)) for k, v in words.get('word-cloud', {}).items(): doc = service_runner.exec( word_cloud, get_hashtag_matching, **plugins_handler.get_kwargs(hashtag=k, environ=environ, logger=logger)) if (doc) and (not doc.get(last_followers)): pass doc = service_runner.exec( word_cloud, get_hashtag_matching, **plugins_handler.get_kwargs( criteria={last_followers: { "$exists": False }}, environ=environ, logger=logger)) if (doc) and (not doc.get(last_followers)): hashtag = doc.get('hashtag') if (hashtag): hashtag_count = 0 h = '{}{}'.format('#' if (hashtag.find('#') == -1) else '', hashtag) for tweeter in tweepy.Cursor(api.search, q=h).items(): friends1 = api.show_friendship( source_screen_name=tweeter.screen_name, target_screen_name=me.screen_name) friends2 = api.show_friendship( source_screen_name=me.screen_name, target_screen_name=tweeter.screen_name) if (not any([f.following for f in friends1])) or (not any( [f.following for f in friends2])): api.create_friendship(screen_name=tweeter.screen_name) hashtag_count += 1 time.sleep(environ.get('hashtags_followers_pace', 60)) if (logger): logger.info('followed {}'.format(tweeter.screen_name)) if (api.is_rate_limit_blown): if (logger): logger.info('Twitter rate limit was blown.') break if (hashtag_count == 0): status = service_runner.exec( word_cloud, delete_one_hashtag, **plugins_handler.get_kwargs(environ=environ, hashtag=hashtag, logger=logger)) assert status, 'Did not delete the hashtags {} for followers.'.format( hashtag) if (logger): logger.warning('Resetting hashtags for new followers.') else: status = service_runner.exec( word_cloud, reset_all_hashtags, **plugins_handler.get_kwargs(environ=environ, attr=last_followers, logger=logger)) assert status, 'Did not reset all the hashtags for followers.' if (logger): logger.warning('Resetting hashtags for new followers.')
def _render_the_page(request, _title, template_name, navigation_menu_type, navigation_tabs, styles_context={}, context={}, footer_context={}, template_folder='', js=[], head=[]): ''' request is the Django request. _title is the title for the site. template_name is the template filename for the body of the content, may be a partial. navigation_menu_type is the tabs menu type for the site navigation bar. navigation_tabs is the list of tabs. context is the Context for the main body of the site per the template_name. template_folder is the prefix for the folder in which the templates reside. (template_name may reside in a different folder than the rest of the templates) ''' import urllib from vyperlogix.misc import _utils from vyperlogix.django import tabs now = _utils.timeStamp(format=formatTimeStr()) _yyyy = _utils.timeStamp(format=formatYYYYStr()) url_toks = [ urllib.unquote_plus(t) for t in request.path.split('/') if (len(t) > 0) ] h = oohtml.Html() try: for j in js: h.tagSCRIPT(src=j) except: pass js_head = h.toHtml() styles_content = '' if (styles_context.has_key('ADDITIONAL_JS')): styles_content = styles_context['ADDITIONAL_JS'] else: styles_context['ADDITIONAL_JS'] = styles_content styles_content = '%s%s' % (js_head, styles_content) styles_context['ADDITIONAL_JS'] = styles_content t_styles = get_template(template_filename(template_folder, '_styles.html')) html_styles = t_styles.render(Context(styles_context)) t_tabs_header = get_template( template_filename(template_folder, '_tabs_header.html')) c = Context({ 'id': tabs.tab_num_from_url(url_toks[0] if (len(url_toks) > 0) else '/', navigation_tabs) }) c.update(styles_context) html_tabs_header = t_tabs_header.render(c) head_content = '' try: for item in head: head_content += item except: pass _delta = datetime.timedelta(days=365.25 * 20) _dt = datetime.datetime.fromtimestamp(time.time()) _expires_time = _dt - _delta _expires_ts = time.strftime(formatMetaDateTimeStr(), _expires_time.timetuple()) _last_modified_ts = time.strftime(formatMetaDateTimeStr(), _dt.timetuple()) t_header = get_template( template_filename(template_folder, '_header_for_content.html')) c = Context({ 'the_title': '%s (%s)' % (_title, now), 'STYLES_CONTENT': html_styles, 'TABS_HEADER': html_tabs_header + head_content, 'EXPIRES_TIME': _expires_ts, 'LAST_MODIFIED': _last_modified_ts }) c.update(context) html_header = t_header.render(c) t_footer = get_template( template_filename(template_folder, '_footer_for_content.html')) c = Context({'current_year': _yyyy}) c.update(footer_context) html_footer = t_footer.render(c) t_content = get_template(template_name) html_content = t_content.render(Context(context)) t_tabs_content = get_template( template_filename(template_folder, '_tabs_content.html')) html_tabs_content = t_tabs_content.render( Context({ 'MENU_TYPE': navigation_menu_type, 'NAVIGATION_TABS': get_tabs_nav_html(navigation_tabs), 'NAVIGATION_CONTENT': get_tabs_nav_content_html(navigation_tabs) })) t = get_template( template_filename(template_folder, 'site_content_template.html')) c = Context({ 'current_year': _yyyy, 'the_title': _title, 'HEADER_FOR_CONTENT': html_header, 'FOOTER_FOR_CONTENT': html_footer, 'CONTENT': html_content, 'TABS_CONTENT': html_tabs_content }) html = t.render(c) return html
def analyse_account_plan(*args, **kwargs): import matplotlib.pyplot as plt import numpy as np import matplotlib.pyplot as plt from collections import namedtuple tweet_factory = lambda: namedtuple('Tweet', ['ts', 'd_ts', 'num', 'delta_secs']) _all_articles = kwargs.get('kwargs', {}).get('all_articles', []) s_all_articles = set(_all_articles) _articles = kwargs.get('kwargs', {}).get('articles', []) s__articles = set(_articles) _adverts = kwargs.get('kwargs', {}).get('adverts', []) s_adverts = set(_adverts) _json_path = kwargs.get('kwargs', {}).get('json_path') s_articles = set(_articles if (_articles) else []) has_articles = False if (len(s_articles) == 0) else True account = kwargs.get('account') if (account): ts = _utils.timeStamp(offset=0, use_iso=True) min_ts = _utils.getFromNativeTimeStamp(ts) max_ts = _utils.getFromNativeTimeStamp(ts) the_plans = {} total_tweets_adverts = 0 total_tweets_articles = 0 plans = account.get(__plans__, {}) _objects = [] _objects_nums = [] count_adverts = 0 count_non_adverts = 0 for _id, details in plans.items(): if (not str(_id)[0].isdigit()): continue _objects.append(_id) s_articles.discard(_id) num_for_object = 0 #_article = __get_articles(_id=_id, environ=environ, tenant_id=twitter_bot_account.tenant_id, mongo_db_name=twitter_bot_account.mongo_db_name, mongo_articles_col_name=twitter_bot_account.mongo_articles_col_name, logger=logger) __is_advert__ = _id in s_adverts if (__is_advert__): count_adverts += 1 else: count_non_adverts += 1 for ts, num in details.items(): d_ts = _utils.getFromNativeTimeStamp(ts, format=None) min_ts = min(min_ts, d_ts) max_ts = max(max_ts, d_ts) bucket = the_plans.get(_id, {}) Tweet = tweet_factory() if (__is_advert__): total_tweets_adverts += num else: total_tweets_articles += num num_for_object += num bucket[ts] = Tweet(ts=ts, d_ts=d_ts, num=num, delta_secs=0) the_plans[_id] = bucket _objects_nums.append(num_for_object) print('min_ts -> {}, max_ts -> {}'.format(min_ts, max_ts)) print('count_adverts -> {}, count_non_adverts -> {}'.format( count_adverts, count_non_adverts)) print('count of _adverts -> {}, count of _articles -> {}'.format( len(_adverts), len(_articles))) stats_adverts_min_secs = sys.maxsize stats_adverts_max_secs = -sys.maxsize stats_articles_min_secs = sys.maxsize stats_articles_max_secs = -sys.maxsize discreet_steps_adverts = [] discreet_steps_articles = [] for _id, details in the_plans.items(): __is_advert__ = _id in s_adverts for ts, tweet in details.items(): assert tweet.d_ts >= min_ts, '(1) Problem with # {} d_ts {} (ts {}) out of range of min_ts {}.'.format( _id, tweet.d_ts, ts, min_ts) assert tweet.d_ts <= max_ts, '(2) Problem with # {} d_ts {} (ts {}) out of range of max_ts {}.'.format( _id, tweet.d_ts, ts, max_ts) secs = tweet.d_ts - min_ts secs = secs.seconds bucket = the_plans.get(_id, {}) Tweet = tweet_factory() bucket[ts] = Tweet(ts=ts, d_ts=tweet.d_ts, num=tweet.num, delta_secs=secs) if (__is_advert__): stats_adverts_min_secs = min(stats_adverts_min_secs, secs) stats_adverts_max_secs = max(stats_adverts_max_secs, secs) discreet_steps_adverts.append(secs) else: stats_articles_min_secs = min(stats_articles_min_secs, secs) stats_articles_max_secs = max(stats_articles_max_secs, secs) discreet_steps_articles.append(secs) the_plans[_id] = bucket discreet_steps_adverts = set(discreet_steps_adverts) discreet_steps_articles = set(discreet_steps_articles) if (is_really_a_string(_json_path)): print('_json_path -> {}'.format(_json_path)) data = { 'adverts': { 'count_adverts': count_adverts, 'len_adverts': len(_adverts), 'stats_adverts_min_secs': stats_adverts_min_secs, 'stats_adverts_max_secs': stats_adverts_max_secs, 'discreet_steps_adverts': len(discreet_steps_adverts), 'total_tweets_adverts': total_tweets_adverts }, 'articles': { 'count_non_adverts': count_non_adverts, 'len_non_adverts': len(_articles), 'stats_articles_min_secs': stats_articles_min_secs, 'stats_articles_max_secs': stats_articles_max_secs, 'discreet_steps_articles': len(discreet_steps_articles), 'total_tweets_articles': total_tweets_articles }, 'summary': { 'adverts_velocity': '%2.2f%%' % ((len(discreet_steps_adverts) / (total_tweets_adverts + total_tweets_articles)) * 100.0), 'articles_velocity': '%2.2f%%' % ((len(discreet_steps_articles) / (total_tweets_adverts + total_tweets_articles)) * 100.0) } } toks = os.path.splitext(_json_path) _json_path1 = '{}_{}{}'.format(toks[0], 1, toks[-1]) with open(_json_path1, 'w') as fOut: print(json.dumps(data, indent=3), file=fOut) _json_path2 = '{}_{}{}'.format(toks[0], 2, toks[-1]) with open(_json_path2, 'w') as fOut: print(json.dumps(plans, indent=3), file=fOut) print( 'Adverts: min_secs -> {}, max_secs -> {}, number of discreet_steps {}' .format(stats_adverts_min_secs, stats_adverts_max_secs, len(discreet_steps_adverts))) print( 'Articles: min_secs -> {}, max_secs -> {}, number of discreet_steps {}' .format(stats_articles_min_secs, stats_articles_max_secs, len(discreet_steps_articles))) assert len( s_articles ) == 0, 'Expected s_articles to NOT be empty but it has {} items. This is a problem.'.format( len(s_articles)) print('Adverts: There were {} total tweets.'.format( total_tweets_adverts)) print('Articles: There were {} total tweets.'.format( total_tweets_articles)) if (0): plt.rcdefaults() y_pos = np.arange(len(_objects)) plt.bar(y_pos, _objects_nums, align='center', alpha=0.5) plt.xticks(y_pos, _objects) plt.ylabel('Num Tweets') plt.title('Tweet Counts by Article') plt.show() print('DEBUG')