def random_reply(): log.info('making random reply') # Choose a random submission from /r/all that is currently hot submission = random.choice(list(api.subreddit('all').hot())) submission.comments.replace_more( limit=0 ) # Replace the "MoreReplies" with all of the submission replies sub_name = submission.subreddit.display_name brain = "{}/{}.db".format(DB_DIR, sub_name) if not glob.glob(brain): learn(sub_name) reply_brain = bot.Brain(brain) try: if prob(.35): #There's a larger chance that we'll reply to a comment. log.info('replying to a comment') comment = random.choice(submission.comments.list()) response = reply_brain.reply(comment.body) reply = comment.reply(response) log.info('Replied to comment: {}'.fomrat(comment)) log.info('Replied with: {}'.format(reply)) else: log.info('replying to a submission') # Pass the users comment to chatbrain asking for a reply response = reply_brain.reply(submission.title) submission.reply(response) log.info('Replied to Title: {}'.format(submission.title)) log.info('Replied with: {}'.format(response)) except Exception as e: log.error(e, exc_info=False)
def visualize(self, data_loader, model, classes, _dir): c = 0 for image, label, image_path in tqdm(data_loader, desc="visualizing", total=np.math.ceil( len(data_loader))): model.zero_grad() label = label[0] image_path = image_path[0] image = Variable(image).cuda() seq_out = model(image) tile_name = "t" + str(c) # tracker = SummaryTracker() res = self.gen_cam_saliency(image, model, 'features', classes) for i, (orig, result, heatmap) in enumerate(res): # save image save_dir = os.path.join(_dir, "saliency_maps") os.makedirs(save_dir, exist_ok=True) p = prob(seq_out, i, decimals=4) if len(p) == 1: p = p[0] for _type, img in [("original", orig), ("blend", result), ("heatmap", heatmap)]: self.save_image(save_dir=os.path.join( save_dir, Path(image_path).stem), file_name="{0}_{1}_({2})_{3}.png".format( tile_name, classes[i], str(p), _type), image=img) c += 1
async def run_monsters(self): if self.montest == True or prob(self.probability): print("HIT - should we play?") if self.montest == True or int( time.time()) > (self.last_run + self.respawn_limit): await self.start_battle() else: print( f"Too Soon... last run: {self.last_run}, current: {time.time()}" ) while True: if self.battle_over(): await self.end_battle() break else: await self.monster_message.edit(content=self.mm_formated()) time.sleep(3)
def _sample(self, method='naive'): """Sample probabilities of system. Parameters ---------- method : {'naive', 'beta:x', 'kt'}, optional Sampling method to use. 'naive' is the standard histrogram method. 'beta:x' is for an add-constant beta estimator, with beta value following the colon eg 'beta:0.01' [1]_. 'kt' is for the Krichevsky-Trofimov estimator [2]_, which is equivalent to 'beta:0.5'. References ---------- .. [1] T. Schurmann and P. Grassberger, "Entropy estimation of symbol sequences," Chaos,vol. 6, no. 3, pp. 414--427, 1996. .. [2] R. Krichevsky and V. Trofimov, "The performance of universal encoding," IEEE Trans. Information Theory, vol. 27, no. 2, pp. 199--207, Mar. 1981. """ calc = self.calc # decimalise if any([c in calc for c in ['HXY', 'HX']]): if self.X_n > 1: d_X = decimalise(self.X, self.X_n, self.X_m) else: # make 1D d_X = self.X.reshape(self.X.size) # unconditional probabilities if ('HX' in calc) or ('ChiX' in calc): self.PX = prob(d_X, self.X_dim, method=method) if any([c in calc for c in ['HXY', 'HiX', 'HiXY', 'HY']]): self.PY = _probcount(self.Ny, self.N, method) if 'SiHXi' in calc: for i in xrange(self.X_n): self.PXi[:, i] = prob(self.X[i, :], self.X_m, method=method) # conditional probabilities if any([c in calc for c in ['HiXY', 'HXY', 'HshXY']]): sstart = 0 for i in xrange(self.Y_dim): send = sstart + self.Ny[i] indx = slice(sstart, send) sstart = send if 'HXY' in calc: # output conditional ensemble oce = d_X[indx] if oce.size == 0: print 'Warning: Null output conditional ensemble for ' + \ 'output : ' + str(i) else: self.PXY[:, i] = prob(oce, self.X_dim, method=method) if any([c in calc for c in ['HiX', 'HiXY', 'HshXY']]): for j in xrange(self.X_n): # output conditional ensemble for a single variable oce = self.X[j, indx] if oce.size == 0: print 'Warning: Null independent output conditional ensemble for ' + \ 'output : ' + str(i) + ', variable : ' + str(j) else: self.PXiY[:, j, i] = prob(oce, self.X_m, method=method) if 'HshXY' in calc: # shuffle #np.random.shuffle(oce) shfoce = np.random.permutation(oce) self.Xsh[j, indx] = shfoce # Pind(X) = <Pind(X|Y)>_y if ('HiX' in calc) or ('ChiX' in calc): # construct joint distribution words = dec2base( np.atleast_2d(np.r_[0:self.X_dim]).T, self.X_m, self.X_n) PiXY = np.zeros((self.X_dim, self.Y_dim)) PiXY = self.PXiY[words, np.r_[0:self.X_n]].prod(axis=1) # average over Y self.PiX = np.dot(PiXY, self.PY) self.sampled = True
else: size = 0 if size < limit: # learn faster early on log.info('fast learning') learn() try: log.info('new db size: ' + str(bytesto(os.path.getsize(MAIN_DB), 'm'))) except: pass countdown(5) if size > limit: # once we learn enough start submissions and replies log.info('database size is big enough') if prob(0.02): # 2% chance we reply to someone reddit.random_reply() if prob(0.00): # 1% chance we make a random submission log.info('making a submission') reddit.random_submission() if prob(0.10): #25% chance we'll learn more log.info('going to learn') learn() # Wait 10 minutes to comment and post because of reddit rate limits countdown(1) log.info('end main loop')
def _sample(self, method='naive'): """Sample probabilities of system. Parameters ---------- method : {'naive', 'beta:x', 'kt'}, optional Sampling method to use. 'naive' is the standard histrogram method. 'beta:x' is for an add-constant beta estimator, with beta value following the colon eg 'beta:0.01' [1]_. 'kt' is for the Krichevsky-Trofimov estimator [2]_, which is equivalent to 'beta:0.5'. References ---------- .. [1] T. Schurmann and P. Grassberger, "Entropy estimation of symbol sequences," Chaos,vol. 6, no. 3, pp. 414--427, 1996. .. [2] R. Krichevsky and V. Trofimov, "The performance of universal encoding," IEEE Trans. Information Theory, vol. 27, no. 2, pp. 199--207, Mar. 1981. """ calc = self.calc # decimalise if any([c in calc for c in ['HXY','HX']]): if self.X_n > 1: d_X = decimalise(self.X, self.X_n, self.X_m) else: # make 1D d_X = self.X.reshape(self.X.size) # unconditional probabilities if ('HX' in calc) or ('ChiX' in calc): self.PX = prob(d_X, self.X_dim, method=method) if any([c in calc for c in ['HXY','HiX','HiXY','HY']]): self.PY = _probcount(self.Ny,self.N,method) if 'SiHXi' in calc: for i in xrange(self.X_n): self.PXi[:,i] = prob(self.X[i,:], self.X_m, method=method) # conditional probabilities if any([c in calc for c in ['HiXY','HXY','HshXY']]): sstart=0 for i in xrange(self.Y_dim): send = sstart+self.Ny[i] indx = slice(sstart,send) sstart = send if 'HXY' in calc: # output conditional ensemble oce = d_X[indx] if oce.size == 0: print 'Warning: Null output conditional ensemble for ' + \ 'output : ' + str(i) else: self.PXY[:,i] = prob(oce, self.X_dim, method=method) if any([c in calc for c in ['HiX','HiXY','HshXY']]): for j in xrange(self.X_n): # output conditional ensemble for a single variable oce = self.X[j,indx] if oce.size == 0: print 'Warning: Null independent output conditional ensemble for ' + \ 'output : ' + str(i) + ', variable : ' + str(j) else: self.PXiY[:,j,i] = prob(oce, self.X_m, method=method) if 'HshXY' in calc: # shuffle #np.random.shuffle(oce) shfoce = np.random.permutation(oce) self.Xsh[j,indx] = shfoce # Pind(X) = <Pind(X|Y)>_y if ('HiX' in calc) or ('ChiX' in calc): # construct joint distribution words = dec2base(np.atleast_2d(np.r_[0:self.X_dim]).T,self.X_m,self.X_n) PiXY = np.zeros((self.X_dim, self.Y_dim)) PiXY = self.PXiY[words,np.r_[0:self.X_n]].prod(axis=1) # average over Y self.PiX = np.dot(PiXY,self.PY) self.sampled = True
def random_reply(): log.info("making random reply") # Choose a random submission from /r/all that is currently hot if SUBREDDIT_LIST: subreddit = random.choice(SUBREDDIT_LIST) submission = random.choice(list(api.subreddit(subreddit).hot())) else: submission = random.choice(list(api.subreddit("all").hot())) submission.comments.replace_more( limit=0 ) # Replace the "MoreReplies" with all of the submission replies sub_name = submission.subreddit.display_name brain = "{}/{}.db".format(DB_DIR, sub_name) log.info(brain) if not glob.glob(brain): learn(sub_name) reply_brain = bot.Brain(brain) try: # if prob(.1): # small chance we advertise # content = share() # comment = random.choice(submission.comments.list()) # log.info('sharing - thanks for helping out!') # sharing = '{} {}'.format(content['comment'], content['url']) # reply = comment.reply(sharing) # log.info("Replied to comment: {}".format(comment.body)) # log.info("Replied with: {}".format(reply)) # return if prob(.35): # There's a larger chance that we'll reply to a comment. log.info("replying to a comment") comment = random.choice(submission.comments.list()) response = reply_brain.reply(comment.body) # We might not be able to learn enough from the subreddit to reply # If we don't, then pull a reply from the general database. if "I don't know enough to answer you yet!" in response: log.info( "I don't know enough from {}, using main brain db to reply" .format(sub_name)) brain = "{}/{}.db".format(DB_DIR, "brain") reply_brain = bot.Brain(brain) response = reply_brain.reply(comment.body) reply = comment.reply(response) log.info("Replied to comment: {}".format(comment.body)) log.info("Replied with: {}".format(response)) else: log.info("replying to a submission") # Pass the users comment to chatbrain asking for a reply response = reply_brain.reply(submission.title) # same as above. nobody will ever see this so it's fine. if "I don't know enough to answer you yet!" in response: log.info( "I don't know enough from {}, using main brain db to reply" .format(sub_name)) brain = "{}/{}.db".format(DB_DIR, "brain") reply_brain = bot.Brain(brain) response = reply_brain.reply(submission.title) submission.reply(response) log.info("Replied to Title: {}".format(submission.title)) log.info("Replied with: {}".format(response)) except praw.exceptions.APIException as e: raise e except Exception as e: log.error(e, exc_info=False)
def init(): log.info("db size size to start replying:" + str(bytesto(MAIN_DB_MIN_SIZE, "m"))) reddit.shadow_check() # check if this is the first time running the bot set_user_info() check_first_run() set_db_size() while True: if get_db_size( ) < MAIN_DB_MIN_SIZE and not COMMENTS_DISABLED: # learn faster early on log.info(""" THE BOT IS WORKING. IT WILL TAKE ABOUT 8 HOURS FOR IT TO LEARN AND START COMMENTING. """) log.info("fast learning") learn() try: log.info("new db size: " + str(bytesto(get_db_size(), "m"))) except: pass set_db_size() countdown(2) if (get_db_size() > MAIN_DB_MIN_SIZE or COMMENTS_DISABLED ): # once we learn enough start submissions and replies log.info("database size is big enough") if USE_SLEEP_SCHEDULE: while should_we_sleep(): log.info("zzzzzzzz :snore:") time.sleep(60) for action in reddit_bot: if action.rate_limit_unlock_epoch != 0: if action.rate_limit_unlock_epoch > get_current_epoch(): log.info( "{} hit RateLimit recently we need to wait {} seconds with this" .format( action.name, action.rate_limit_unlock_epoch - get_current_epoch(), )) continue else: action._replace(rate_limit_unlock_epoch=0) else: if prob(action.probability): log.info("making a random {}".format(action.name)) try: action.action() except praw.exceptions.APIException as e: secs_to_wait = get_seconds_to_wait(str(e)) action._replace( rate_limit_unlock_epoch=(get_current_epoch() + secs_to_wait)) log.info( "{} hit RateLimit, need to sleep for {} seconds" .format(action.name, secs_to_wait)) except Exception as e: log.error("something weird happened, {}".format(e), exc_info=True) if prob(PROBABILITIES["LEARN"]): # chance we'll learn more log.info("going to learn") learn() # Wait 10 minutes to comment and post because of reddit rate limits countdown(1) log.info("end main loop")
for action in reddit_bot: if action.rate_limit_unlock_epoch != 0: if action.rate_limit_unlock_epoch > get_current_epoch(): log.info( "{} hit RateLimit recently we need to wait {} seconds with this" .format( action.name, action.rate_limit_unlock_epoch - get_current_epoch(), )) continue else: action._replace(rate_limit_unlock_epoch=0) else: if prob(action.probability): log.info("making a random {}".format(action.name)) try: action.action() except praw.exceptions.APIException as e: secs_to_wait = get_seconds_to_wait(str(e)) action._replace( rate_limit_unlock_epoch=(get_current_epoch() + secs_to_wait)) log.info( "{} hit RateLimit, need to sleep for {} seconds" .format(action.name, secs_to_wait)) except Exception as e: log.error("something weird happened, {}".format(e), exc_info=True) if prob(PROBABILITIES["LEARN"]): # chance we'll learn more
def infer(images): mitoses = 0 inp = Variable(images.cuda()) seq_out = model(inp) return prob(seq_out, label=mitoses)
if size < limit: # learn faster early on log.info('fast learning') learn() try: log.info('new db size: ' + str(bytesto(os.path.getsize(MAIN_DB), 'm'))) except: pass countdown(5) if size > limit: # once we learn enough start submissions and replies log.info('database size is big enough') if prob(0.02): # 2% chance we reply to someone log.info('making a random reply') reddit.random_reply() if prob(0.02): # 1% chance we make a random submission log.info('making a submission') reddit.random_submission() if prob(0.01): # chance we'll learn more log.info('going to learn') learn() if prob( 0.02 ): # 5% chance we'll delete previous comments with negative upvote log.info('going to clean up "bad" comments')