def main(): log = common.setup_logger("tweet_articles") # Log into the Twitter account log.info("Logging into Twitter account") auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) twitter_api = tweepy.API(auth) # Get the list of articles to tweet (if any) if not os.path.exists("HelplineStockTweetList.txt"): log.error("HelplineStockTweetList.txt is missing!") return try: infile = codecs.open("HelplineStockTweetList.txt.txt", "r", "utf8") except: infile = open("HelplineStockTweetList.txt", "r") tweets = [] for line in infile: tweet = line.rstrip() tweets.append(tweet) # Tweet the articles log.info("Sending %i new tweets" % len(tweets)) tweet = tweets.pop(0) send_tweet(log, twitter_api, "Sending first tweet - Tweet contents:", tweet) counter = 1 wait_time = TWEET_SHIFT * 3600 / len(tweets) for tweet in tweets: log.info("Waiting %i minutes before sending next tweet" % (wait_time / 60)) time.sleep(wait_time) send_tweet(log, twitter_api, "Sending tweet %i - Tweet contents:" % counter, tweet) counter += 1
def __init__(self, host = "", port = 143, username = "", password = "", ssl = False, folder = "INBOX"): self.__host = host self.__port = port self.__username = username self.__password = password self.__ssl = ssl self.__logged_in = False self.__log = common.setup_logger("Mailbox") if self.__login(): self.__logged_in = True self.switchFolder(folder)
def main(): log = common.setup_logger("tweet_articles") # Log into the Twitter account log.info("Logging into Twitter account") auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) twitter_api = tweepy.API(auth) # Get the list of articles to tweet (if any) if not os.path.exists("new_tweets.txt"): log.error("new_tweets.txt is missing. Please run build_new_tweets.py script first.") return try: infile = codecs.open("new_tweets.txt", "r", "utf8") except: infile = open("new_tweets.txt", "r") articles = [] for line in infile: article = line.rstrip() articles.append(article) # Build the list of tweets interval = len(articles) / len(STOCK_TWEETS) counter = 1 tweets = [] for article in articles: tweets.append(article) if counter == interval: if len(STOCK_TWEETS): tweets.append(STOCK_TWEETS.pop(0)) counter = 1 else: counter += 1 tweets.append(TWEET_SIGN_OFF) # Tweet the articles send_tweet(log, twitter_api, "Sending Greeting - Tweet contents:", TWEET_GREETING) log.info("Sending %i new tweets" % len(tweets)) counter = 1 wait_time = TWEET_SHIFT * 3600 / len(tweets) for tweet in tweets: log.info("Waiting %i minutes before sending next tweet" % (wait_time / 60)) time.sleep(wait_time) send_tweet(log, twitter_api, "Sending tweet %i - Tweet contents:" % counter, tweet) counter += 1
# Raspberry Pi Home Security System - master_controller __author__ = "Caleb Madrigal" import time import json import zmq import logging import datetime import smtplib from smtplib import SMTP_SSL import settings import gpio_helper from common import setup_logger logger = setup_logger("master", settings.master_log_file, logging.DEBUG) ######################################################################################### State file def build_init_state(): state = {'automation_mode': 'off', 'switches': {}} for switch_id in settings.switches: state['switches'][switch_id] = 'off' return state def save_state(state): with open(settings.state_file, 'w') as f: f.write(json.dumps(state))
#!/usr/bin/env python # -*- coding: utf-8 -*- from yapsy.PluginManager import PluginManagerSingleton import sys import common import plugins import logging loaded_plugins = {} log = logging.getLogger('cacus.loader') yapsy_log = common.setup_logger('yapsy') def load_plugins(): manager = PluginManagerSingleton.get() manager.setPluginPlaces(['plugins/']) manager.setCategoriesFilter({ 'storage': plugins.IStoragePlugin, }) manager.locatePlugins() manager.collectPlugins() for category in ('storage',): try: cfg = common.config[category] for plugin in manager.getPluginsOfCategory(category): log.info("Found plugin %s", plugin.name) if plugin.name == cfg['type']:
# Raspberry Pi Home Security System - gpio_helper __author__ = "Caleb Madrigal" import zmq import time import logging import settings from common import setup_logger try: import RPi.GPIO as GPIO except ImportError: import gpio_mock as GPIO logger = setup_logger("gpio", settings.gpio_log_file, logging.DEBUG) def pulse_pin(pin, pulse_time_in_secs=1.5): GPIO.output(pin, True) time.sleep(pulse_time_in_secs) GPIO.output(pin, False) def setup_switch_gpio_pins(): # Setup modes for GPIO pins GPIO.setmode(GPIO.BCM) logger.info("Switch on pins: {0}".format(settings.on_pins)) logger.info("Switch off pins: {0}".format(settings.off_pins))
def main(): new_articles = [] log = common.setup_logger("build_tweets_from_alerts") # Log into the Bitly account bitly_api = bitly.Api(login = BITLY_USERNAME, apikey = BITLY_API_KEY) # Connect to Gmail account gmail = Mailbox(EMAIL_SERVER_ADDRESS, EMAIL_SERVER_PORT, EMAIL_ADDRESS, EMAIL_PASSWORD, EMAIL_USE_SSL, EMAIL_FOLDER) if not gmail.loggedIn(): log.error("Unable to login to Gmail account") return # Create a directory to store the alert emails (for future use) if not os.path.exists("alert_emails"): os.mkdir("alert_emails") # Search the Gmail INBOX for Google Alert emails messages = gmail.getMessages('(FROM "*****@*****.**")') if len(messages) == 0: log.info("No Google Alert emails available") return # Store and parse each of the Google Alert emails for message in messages: gmail.markAsRead(message) # Download the email and store it contents = gmail.downloadSingleEmail(message) mail_obj = email.message_from_string(contents) struct_time = rfc822.parsedate(mail_obj.get("Date")) time_str = time.strftime("%y-%m-%d_%H.%M.%S", struct_time) email_name = "alert_email_%s.msg" % time_str log.info("Downloading email and storing to %s" % email_name) email_file = open(os.path.join("alert_emails", email_name), "w+") email_file.write(contents) email_file.close() # Parse the email and pull out the articles try: articles = parse_email(mail_obj) except: log.error("Error parsing Google Alert email %s" % email_file) continue log.info("Found %i articles in Google Alert email" % len(articles)) new_articles += articles # Store the articles in the new_tweets.txt file so that they can # be tweeted later outfile = codecs.open("new_tweets.html", "w", "utf8") outfile.write("<html>\n<head></head>\n<body>\n") log.info("Found %i total articles in Google Alert emails" % len(new_articles)) counter = 1 error_count = 0 for article in new_articles: try: short_url = bitly_api.shorten(article["link"]) except: log.error("Unable to shorten URL %s" % article["link"]) error_count += 1 continue tweet = build_tweet(article, short_url) log.info("******************* Article %i *******************" % counter) log.info("Article Country: %s" % article["country"]) log.info("Article Title: %s" % article["title"]) log.info("Article Link: %s" % article["link"]) log.info("Tweet: %s" % tweet) outfile.write("%s\n" % tweet) counter += 1 # There appears to be a limit to the number of URLs that can be shortened # per minute. Thefore we need to add a delay to prevent us from hitting # that limit. time.sleep(2.0) outfile.write("</body>\n</html>") outfile.close() log.info("****** SUMMARY ******") log.info("Articles that could be converted to tweets: %i" % counter) log.info("Articles that could not be converted to tweets: %i" % error_count)
from bottle import response import json import exceptions import os import requests import logging as logger import common import database import datetime import responder_messages as messages import log_messages import slackapi common.setup_logger() def confirm_command_reception(): """Immediate response to a request.""" response.add_header("Content-Type", "application/json") response_content = {"text": messages.REQUEST_RECEIVED} return json.dumps(response_content, ensure_ascii=False).encode("utf-8") def create_team_delayed_reply_missing_args(request): """Delayed response to Slack reporting not enough arguments on create team command""" response_content = { "text": messages.BAD_COMMAND_USAGE + messages.CREATE_TEAM_COMMAND_USAGE } send_delayed_response(request['response_url'], response_content)
def run(config_file): print("--- Tiny ImageNet 200 Playground : Training --- ") print("Load config file ... ") config = load_config(config_file) seed = config.get("SEED", 2018) random.seed(seed) torch.manual_seed(seed) output = config["OUTPUT_PATH"] model = config["MODEL"] model_name = model.__class__.__name__ debug = config.get("DEBUG", False) from datetime import datetime now = datetime.now() log_dir = os.path.join( output, "training_{}_{}".format(model_name, now.strftime("%Y%m%d_%H%M"))) if not os.path.exists(log_dir): os.makedirs(log_dir) log_level = logging.INFO if debug: log_level = logging.DEBUG print("Activated debug mode") logger = logging.getLogger("Tiny ImageNet 200: Train") setup_logger(logger, os.path.join(log_dir, "train.log"), log_level) logger.debug("Setup tensorboard writer") writer = SummaryWriter(log_dir=os.path.join(log_dir, "tensorboard")) save_conf(config_file, log_dir, logger, writer) device = 'cpu' if torch.cuda.is_available(): logger.debug("CUDA is enabled") from torch.backends import cudnn cudnn.benchmark = True device = 'cuda' model = model.to(device) logger.debug("Setup train/val dataloaders") dataset_path = config["DATASET_PATH"] train_data_transform = config["TRAIN_TRANSFORMS"] val_data_transform = config["VAL_TRANSFORMS"] train_batch_size = config.get("BATCH_SIZE", 64) val_batch_size = config.get("VAL_BATCH_SIZE", train_batch_size) num_workers = config.get("NUM_WORKERS", 8) trainval_split = config.get("TRAINVAL_SPLIT", { 'fold_index': 0, 'n_splits': 7 }) train_loader, val_loader = get_trainval_data_loaders(dataset_path, train_data_transform, val_data_transform, train_batch_size, val_batch_size, trainval_split, num_workers, device=device) indices = np.arange(len(train_loader.dataset)) np.random.shuffle(indices) indices = indices[:len(val_loader.dataset)] if len( val_loader.dataset) < len(train_loader.dataset) else indices train_eval_loader = get_train_eval_data_loader(train_loader, indices) write_model_graph(writer, model=model, data_loader=train_loader, device=device) optimizer = config["OPTIM"] logger.debug("Setup criterion") criterion = nn.CrossEntropyLoss() if 'cuda' in device: criterion = criterion.to(device) lr_schedulers = config.get("LR_SCHEDULERS") logger.debug("Setup ignite trainer and evaluator") trainer = create_supervised_trainer(model, optimizer, criterion, device=device) metrics = { 'accuracy': CategoricalAccuracy(), 'precision': Precision(), 'recall': Recall(), 'nll': Loss(criterion) } train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) val_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) logger.debug("Setup handlers") log_interval = config.get("LOG_INTERVAL", 100) reduce_on_plateau = config.get("REDUCE_LR_ON_PLATEAU") # Setup timer to measure training time timer = Timer(average=True) timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED, pause=Events.ITERATION_COMPLETED) @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): iter = (engine.state.iteration - 1) % len(train_loader) + 1 if iter % log_interval == 0: logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.4f}".format( engine.state.epoch, iter, len(train_loader), engine.state.output)) writer.add_scalar("training/loss_vs_iterations", engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_STARTED) def update_lr_schedulers(engine): if lr_schedulers is not None: for lr_scheduler in lr_schedulers: lr_scheduler.step() @trainer.on(Events.EPOCH_STARTED) def log_lrs(engine): if len(optimizer.param_groups) == 1: lr = float(optimizer.param_groups[0]['lr']) writer.add_scalar("learning_rate", lr, engine.state.epoch) logger.debug("Learning rate: {}".format(lr)) else: for i, param_group in enumerate(optimizer.param_groups): lr = float(param_group['lr']) logger.debug("Learning rate (group {}): {}".format(i, lr)) writer.add_scalar("learning_rate_group_{}".format(i), lr, engine.state.epoch) log_images_dir = os.path.join(log_dir, "figures") os.makedirs(log_images_dir) def log_precision_recall_results(metrics, epoch, mode): for metric_name in ['precision', 'recall']: value = metrics[metric_name] avg_value = torch.mean(value).item() writer.add_scalar("{}/avg_{}".format(mode, metric_name), avg_value, epoch) # Save metric per class figure sorted_values = value.to('cpu').numpy() indices = np.argsort(sorted_values) sorted_values = sorted_values[indices] n_classes = len(sorted_values) classes = np.array( ["class_{}".format(i) for i in range(n_classes)]) sorted_classes = classes[indices] fig = create_fig_param_per_class(sorted_values, metric_name, classes=sorted_classes, n_classes_per_fig=20) fname = os.path.join( log_images_dir, "{}_{}_{}_per_class.png".format(mode, epoch, metric_name)) fig.savefig(fname) # Add figure in TB img = Image.open(fname) tag = "{}_{}".format(mode, metric_name) writer.add_image(tag, np.asarray(img), epoch) @trainer.on(Events.EPOCH_COMPLETED) def log_training_metrics(engine): epoch = engine.state.epoch logger.info("One epoch training time (seconds): {}".format( timer.value())) metrics = train_evaluator.run(train_eval_loader).metrics logger.info( "Training Results - Epoch: {} Avg accuracy: {:.4f} Avg loss: {:.4f}" .format(engine.state.epoch, metrics['accuracy'], metrics['nll'])) writer.add_scalar("training/avg_accuracy", metrics['accuracy'], epoch) writer.add_scalar("training/avg_error", 1.0 - metrics['accuracy'], epoch) writer.add_scalar("training/avg_loss", metrics['nll'], epoch) log_precision_recall_results(metrics, epoch, "training") @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): epoch = engine.state.epoch metrics = val_evaluator.run(val_loader).metrics writer.add_scalar("validation/avg_loss", metrics['nll'], epoch) writer.add_scalar("validation/avg_accuracy", metrics['accuracy'], epoch) writer.add_scalar("validation/avg_error", 1.0 - metrics['accuracy'], epoch) logger.info( "Validation Results - Epoch: {} Avg accuracy: {:.4f} Avg loss: {:.4f}" .format(engine.state.epoch, metrics['accuracy'], metrics['nll'])) log_precision_recall_results(metrics, epoch, "validation") if reduce_on_plateau is not None: @val_evaluator.on(Events.COMPLETED) def update_reduce_on_plateau(engine): val_loss = engine.state.metrics['nll'] reduce_on_plateau.step(val_loss) def score_function(engine): val_loss = engine.state.metrics['nll'] # Objects with highest scores will be retained. return -val_loss # Setup early stopping: if "EARLY_STOPPING_KWARGS" in config: kwargs = config["EARLY_STOPPING_KWARGS"] if 'score_function' not in kwargs: kwargs['score_function'] = score_function handler = EarlyStopping(trainer=trainer, **kwargs) setup_logger(handler._logger, os.path.join(log_dir, "train.log"), log_level) val_evaluator.add_event_handler(Events.COMPLETED, handler) # Setup model checkpoint: best_model_saver = ModelCheckpoint(log_dir, filename_prefix="model", score_name="val_loss", score_function=score_function, n_saved=5, atomic=True, create_dir=True) val_evaluator.add_event_handler(Events.COMPLETED, best_model_saver, {model_name: model}) last_model_saver = ModelCheckpoint(log_dir, filename_prefix="checkpoint", save_interval=1, n_saved=1, atomic=True, create_dir=True) trainer.add_event_handler(Events.EPOCH_COMPLETED, last_model_saver, {model_name: model}) n_epochs = config["N_EPOCHS"] logger.info("Start training: {} epochs".format(n_epochs)) try: trainer.run(train_loader, max_epochs=n_epochs) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") except Exception as e: # noqa logger.exception("") if debug: try: # open an ipython shell if possible import IPython IPython.embed() # noqa except ImportError: print("Failed to start IPython console") logger.debug("Training is ended") writer.close()
if not len(frame_list): continue image = np.hstack(frame_list) elif self.mode == 'production': image = self.frame_dict.get(self.main_stream) if image is None: continue cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow("image", image) if cv2.waitKey(1) & 0xFF == ord('p'): self.mode = 'production' elif cv2.waitKey(1) & 0xFF == ord('d'): self.mode = 'dev' elif cv2.waitKey(1) & 0xFF == ord('q'): break def __del__(self): cv2.destroyAllWindows() # safely close client # self.clients[1].close() # clients[1].close() if __name__ == '__main__': logger = setup_logger() cam = LogitechLive().start() cam.display()
def run(config_file): print("--- iMaterialist 2018 : Inference --- ") print("Load config file ... ") config = load_config(config_file) seed = config.get("SEED", 2018) random.seed(seed) torch.manual_seed(seed) output = Path(config["OUTPUT_PATH"]) debug = config.get("DEBUG", False) from datetime import datetime now = datetime.now() # log_dir = output / "inference_{}_{}".format(model_name, now.strftime("%Y%m%d_%H%M")) log_dir = output / ("{}".format(Path(config_file).stem)) / "{}".format( now.strftime("%Y%m%d_%H%M")) assert not log_dir.exists(), \ "Output logging directory '{}' already existing".format(log_dir) log_dir.mkdir(parents=True) shutil.copyfile(config_file, (log_dir / Path(config_file).name).as_posix()) log_level = logging.INFO if debug: log_level = logging.DEBUG print("Activated debug mode") logger = logging.getLogger("iMaterialist 2018: Inference") setup_logger(logger, (log_dir / "predict.log").as_posix(), log_level) logger.debug("Setup tensorboard writer") writer = SummaryWriter(log_dir=(log_dir / "tensorboard").as_posix()) save_conf(config_file, log_dir.as_posix(), logger, writer) model = config["MODEL"] device = config.get("DEVICE", 'cuda') if 'cuda' in device: assert torch.cuda.is_available(), \ "Device {} is not compatible with torch.cuda.is_available()".format(device) from torch.backends import cudnn cudnn.benchmark = True logger.debug("CUDA is enabled") model = model.to(device) logger.debug("Setup test dataloader") test_loader = config["TEST_LOADER"] logger.debug("Setup ignite inferencer") inferencer = create_inferencer(model, device=device) n_tta = config["N_TTA"] n_classes = config["N_CLASSES"] batch_size = test_loader.batch_size logger.debug("Setup handlers") # Setup timer to measure evaluation time timer = Timer(average=True) timer.attach(inferencer, start=Events.STARTED, resume=Events.ITERATION_STARTED, pause=Events.ITERATION_COMPLETED) n_samples = len(test_loader.dataset) indices = np.zeros((n_samples, ), dtype=np.int) y_probas_tta = np.zeros((n_samples, n_classes, n_tta)) @inferencer.on(Events.EPOCH_COMPLETED) def log_tta(engine): logger.debug("TTA {} / {}".format(engine.state.epoch, n_tta)) @inferencer.on(Events.ITERATION_COMPLETED) def save_results(engine): output = engine.state.output tta_index = engine.state.epoch - 1 start_index = ( (engine.state.iteration - 1) % len(test_loader)) * batch_size end_index = min(start_index + batch_size, n_samples) batch_y_probas = output['y_pred'].detach().numpy() y_probas_tta[start_index:end_index, :, tta_index] = batch_y_probas if tta_index == 0: indices[start_index:end_index] = output['indices'] logger.info("Start inference") try: inferencer.run(test_loader, max_epochs=n_tta) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") return except Exception as e: # noqa logger.exception("") if debug: try: # open an ipython shell if possible import IPython IPython.embed() # noqa except ImportError: print("Failed to start IPython console") return # Average probabilities: y_probas = np.mean(y_probas_tta, axis=-1) if config["SAVE_PROBAS"]: logger.info("Write probabilities file") probas_filepath = log_dir / "probas.csv" write_probas(indices, y_probas, probas_filepath) else: y_preds = np.argmax(y_probas, axis=-1) + 1 # as labels are one-based logger.info("Write submission file") submission_filepath = log_dir / "predictions.csv" sample_submission_path = config["SAMPLE_SUBMISSION_PATH"] write_submission(indices, y_preds, sample_submission_path, submission_filepath)
def main(): parser = argparse.ArgumentParser( description= "Initialize a build directory for completing a tutorial. Invoke from " "an empty sub directory, or the tutorials directory, in which case a " "new build directory will be created for you.") parser.add_argument('--verbose', action='store_true', help="Output everything including debug info", default=False) parser.add_argument( '--quiet', action='store_true', help="Suppress output except for tutorial completion information", default=True) parser.add_argument('--plat', type=str, choices=common.ALL_CONFIGS) parser.add_argument('--tut', type=str, choices=common.ALL_TUTORIALS, required=True) parser.add_argument('--solution', action='store_true', help="Generate pre-made solutions", default=False) parser.add_argument('--task', help="Generate pre-made solutions") parser.add_argument('tutedir', nargs='?', default=os.getcwd()) args = parser.parse_args() common.setup_logger(__name__) common.set_log_level(args.verbose, True) # Additional config/tutorial combination validation if not args.plat: # just pick the first platform that works for this tutorial args.plat = list(common.TUTORIALS[args.tut])[0] if args.plat not in common.TUTORIALS[args.tut]: logging.error( "Tutorial %s not supported by platform %s. Valid platforms are %s: ", args.tut, args.plat, common.TUTORIALS[args.tut]) return -1 # Check that the current working directory is empty. If not create a suitably # named build directory and switch to it dir_contents = os.listdir(args.tutedir) initialised = False if ".tute_config" in dir_contents: initialised = True if len(dir_contents) != 0 and ".tute_config" not in dir_contents: # Check that we are in the tutorial root directory before we decide to start # Making new directories if not os.access(os.getcwd() + "/init", os.X_OK): logging.error("Current dir %s is invalid" % os.getcwd()) parser.print_help(sys.stderr) return -1 tute_dir = os.path.join(os.getcwd(), args.tut) if os.path.exists(tute_dir): tute_dir = tempfile.mkdtemp(dir=os.getcwd(), prefix=('%s' % (args.tut))) else: os.mkdir(tute_dir) os.chdir(tute_dir) else: tute_dir = args.tutedir # Check that our parent directory is an expected tutorial root directory if not os.access(os.getcwd() + "/../init", os.X_OK): logging.error("Parent directory is not tutorials root directory") return -1 # Initialize cmake. Output will be supressed as it defaults to the background build_dir = "%s_build" % tute_dir if not initialised: os.mkdir(build_dir) result = common.init_directories(args.plat, args.tut, args.solution, args.task, initialised, tute_dir, build_dir, sys.stdout) if result.exit_code != 0: logging.error("Failed to initialize build directory.") return -1 # Inform the user about any subdirectory we might have made print("Tutorials created in subdirectory \"%s\"." % os.path.basename(tute_dir)) print("Build directory initialised in \"%s\"." % os.path.basename(build_dir)) return 0