async def on_message(self, message): if message.author == self.bot.user: return if (self.bot.conf["azurefilter"] == True): if len(message.attachments) > 0: json = {'url': '{0}'.format(message.attachments[0].url)} logger.info("Azure request: " + self.bot.conf["azure_url_ext"] + "\n" + str(json) + "\n" + str(self.bot.conf["azure_key"])) tuomio = self.azure_request(self.bot.conf["azure_url_ext"], json, self.bot.conf["azure_key"]) if (tuomio is False): await message.add_reaction(self.bot.conf["emoji_ok"]) elif (tuomio == True and message.channel.is_nsfw() is False): await message.delete() msg = 'Image was deleted due to high Adultscore\nPlease repost to NSFW' logger.info("NSFW image deleted") await message.channel.send(msg) else: logger.warning("Failed to fetch NFSW rating, status: " + tuomio) await message.add_reaction(self.bot.conf["emoji_nok"])
def _date_from_heading(self, heading): try: match = re.search(r'\d{2}/\d{2}/\d{4}', heading).group(0) if match is not None: return match except: logger.warning(f'could not extract date from heading: \n{heading}') pass
def extract_color(self, colorname, boundaries): logger.info("Extracting the {} pathway".format(colorname)) lower, upper = boundaries # create NumPy arrays from the boundaries lower = np.array(lower, dtype="uint8") upper = np.array(upper, dtype="uint8") # find the colors within the specified boundaries # and apply the mask mask_color = cv2.inRange(self.img_hsv, lower, upper) if colorname == "red": lower, upper = RED_UPPER_BOUNDARIES # create NumPy arrays from the boundaries lower = np.array(lower, dtype="uint8") upper = np.array(upper, dtype="uint8") mask_color_neg = cv2.inRange(self.img_hsv, lower, upper) mask_color = cv2.bitwise_or(mask_color, mask_color_neg) if self.debug: cv2.imshow("Color mask", mask_color) cv2.waitKey(0) mask = cv2.bitwise_and(mask_color, self.bkgrd_mask) # Arbitraty number of pixel minimum to validate the color if sum(mask.flatten()) < 1000: logger.warning("Less than 1000 pixel for {}.".format(colorname)) return # Extract colored part colored_part = cv2.bitwise_and(src1=self.img_rgb, src2=self.img_rgb, mask=mask) # Add blur to the gray version gray_image_blur = cv2.GaussianBlur(src=self.img_gray, ksize=(11, 11), sigmaX=0) # Extract the gray part gray_part = cv2.bitwise_and(src1=gray_image_blur, src2=gray_image_blur, mask=cv2.bitwise_not(mask)) # Mix the gray part and the color part extracted_path = colored_part + gray_part # save the extracted_path filename = ''.join("{}_{}.jpg".format(args["image"].split(".")[0], colorname)) # Side by side original picture and the extracted path if self.debug: cv2.imwrite(filename, np.hstack([self.img_rgb, extracted_path])) else: cv2.imwrite(filename, extracted_path)
async def on_guild_channel_create(self, channel): logger.info("Detected new channel created: " + channel.name) if (bool(re.match(self.bot.conf["club_prefix"], channel.name))): role = discord.utils.get(channel.guild.roles, name=channel.name) logger.info("Found role " + role.name) if (role == None): logger.warning("This probably shouldn't happen :E") else: logger.info("Set default permissions: role " + role.name + " can send messages to channel " + channel.name) await channel.set_permissions(role, send_messages=True) await channel.set_permissions(role, view_channel=True)
def _readConf(self): try: if os.path.isfile(CONFIGFILE) == False: logger.error("Controller configuration file is not valid: "+CONFIGFILE) ini = configparser.ConfigParser() ini.read(CONFIGFILE) if 'Controller' in ini: logger.debug("Reading Controller config") config = ini['Controller'] try: if config["MessageLevel"] == "DEBUG": logger.setLevel(logging.DEBUG) elif config["MessageLevel"] == "WARNING": logger.setLevel(logging.WARNING) elif config["MessageLevel"] == "ERROR": logger.setLevel(logging.ERROR) elif config["MessageLevel"] == "INFO": logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO) except KeyError: logger.setLevel(logging.INFO) try: if config["BeerTempAdjust"] != "": self.beerTAdjust = float(config.get("BeerTempAdjust")) else: raise Exception except: self.beerTAdjust = 0.0 logger.warning("Invalid BeerTempAdjust in configuration; using default: "+str(self.beerTAdjust)) try: if config["ChamberTempAdjust"] != "": self.chamberTAdjust = float(config.get("ChamberTempAdjust")) else: raise Exception except: self.chamberTAdjust = 0.0 logger.warning("Invalid BeerTempAdjust in configuration; using default: "+str(self.chamberTAdjust)) try: if config["OnDelay"] != "" and int(config["OnDelay"]) >= 0: self.onDelay = int(config.get("OnDelay"))*60 else: raise Exception except: self.onDelay = DEFAULT_ON_DELAY logger.warning("Invalid OnDelay in configuration; using default: "+str(self.onDelay)+" seconds") except: logger.warning("Problem read from configuration file: "+CONFIGFILE) logger.debug("Controller config updated")
def connect(self, HOST): try: # Python 3.x from urllib.parse import quote_plus except ImportError: # Python 2.x from urllib import quote_plus uri = "mongodb://%s:%s@%s" % (quote_plus('NetsmartAdmin'), quote_plus('Netsmart99'), HOST) connection = MongoClient(uri, 27017) logger.info('Connection!') if connection is None: # logfile.debug('Unable to connect to mongo database: %s', uri) logger.warning('Unable to connect to mongo database: %s', uri) return else: # logfile.debug('successful connection to mongo database: %s ', uri) # logconsole.debug('successful connection to mongo database: %s ', uri) logger.info('Connection Works!') return connection
def drop_backup_evolv_collection(collection): logger.warning('drop_backup_evolv_collection called') cx = MongoDB(HOST) backup_evolv = cx.getDB('backup_hiera_evolv') backup_evolv[collection].drop
def _readConf(self): bDefaultInterval = True bDefaultBtDeviceId = True if os.path.isfile(CONFIGFILE) == False: logger.error("Tilt configuration file is not valid: " + CONFIGFILE) else: ini = configparser.ConfigParser() try: logger.debug("Reading Tilt config: " + CONFIGFILE) ini.read(CONFIGFILE) if self.color in ini: config = ini[self.color] try: if config["UpdateIntervalSeconds"] != "" and int( config["UpdateIntervalSeconds"] ) >= MINIMUM_INTERVAL: self.interval = int( config.get("UpdateIntervalSeconds")) bDefaultInterval = False else: self.interval = MINIMUM_INTERVAL except KeyError: pass try: if config["BluetoothDeviceId"] != "" and int( config["BluetoothDeviceId"]) >= 0: self.bluetoothDeviceId = int( config.get("BluetoothDeviceId")) bDefaultBtDeviceId = False else: self.bluetoothDeviceId = 0 except KeyError: pass try: if config["MessageLevel"] == "DEBUG": logger.setLevel(logging.DEBUG) elif config["MessageLevel"] == "WARNING": logger.setLevel(logging.WARNING) elif config["MessageLevel"] == "ERROR": logger.setLevel(logging.ERROR) elif config["MessageLevel"] == "INFO": logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO) except KeyError: logger.setLevel(logging.INFO) else: logger.error("[" + self.color + "] section not found in ini file: " + CONFIGFILE) except: pass if bDefaultInterval or bDefaultBtDeviceId: logger.warning("Problem read from configuration file: \""+CONFIGFILE+ \ "\". Using some default values[**] for Tilt configuration. It could take a minute for updated values in config file to be used.") sConf = "Color = " + self.color sConf = sConf + "\nUpdateIntervalSeconds = " + str(self.interval) if bDefaultInterval: sConf = sConf + "**" sConf = sConf + "\nBluetoothDeviceId = " + str( self.bluetoothDeviceId) if bDefaultBtDeviceId: sConf = sConf + "**" print(sConf)
def _readConf(self): try: if os.path.isfile(CONFIGFILE) == False: logger.error("Chamber configuration file is not valid: " + CONFIGFILE) ini = configparser.ConfigParser() ini.read(CONFIGFILE) if 'Chamber' in ini: logger.debug("Reading Chamber config") config = ini['Chamber'] try: if config["MessageLevel"] == "DEBUG": logger.setLevel(logging.DEBUG) elif config["MessageLevel"] == "WARNING": logger.setLevel(logging.WARNING) elif config["MessageLevel"] == "ERROR": logger.setLevel(logging.ERROR) elif config["MessageLevel"] == "INFO": logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO) except KeyError: logger.setLevel(logging.INFO) # Read temperatures to target for each date try: if config["Temps"] != "": self.targetTemps = [] t = config["Temps"].split(",") for x in t: self.targetTemps.append(float(x)) else: raise Exception except: self.targetTemps = [DEFAULT_TEMP] logger.warning("Invalid temp values; using default: " + str(self.targetTemps[0])) # Read dates when temperature should change try: if config["Dates"] != "": self.tempDates = [] dts = config["Dates"].split(",") for x in dts: self.tempDates.append( datetime.datetime.strptime( x, '%d/%m/%Y %H:%M:%S')) else: raise Exception except: self.tempDates = [ datetime.datetime.now(), datetime.datetime.now() ] logger.warning( "Invalid date values; using default. Heating/cooling will NOT start" ) if len(self.tempDates) != len(self.targetTemps) + 1: self.tempDates = [ datetime.datetime.now(), datetime.datetime.now() ] self.targetTemps = [DEFAULT_TEMP] logger.warning( "Invalid date or time values; using default. Heating/cooling will NOT start" ) try: if config["BeerTemperatureBuffer"] != "" and float( config["BeerTemperatureBuffer"]) >= 0.0: self.bufferBeerTemp = float( config.get("BeerTemperatureBuffer")) else: raise Exception except: self.bufferBeerTemp = DEFAULT_BUFFER_BEER_TEMP logger.warning( "Invalid beer temperature buffer in configuration; using default: " + str(self.bufferBeerTemp)) try: if config["ChamberScaleBuffer"] != "" and float( config["ChamberScaleBuffer"]) >= 0.0: self.bufferChamberScale = float( config.get("ChamberScaleBuffer")) else: raise Exception except: self.bufferChamberScale = DEFAULT_BUFFER_CHAMBER_SCALE logger.warning( "Invalid chamber scale buffer in configuration; using default: " + str(self.bufferChamberScale)) except: logger.warning("Problem read from configuration file: " + CONFIGFILE) logger.debug("Chamber config updated")
def read_settings(): global sTiltColor global chamberControlTemp global bUseTilt global logLevel chamberControlTemp = CONTROL_WIRE bUseTilt = False logLevel = logging.INFO logger.debug("Reading configfile: " + CONFIGFILE) try: if os.path.isfile(CONFIGFILE) == False: raise Exception ini = configparser.ConfigParser() ini.read(CONFIGFILE) except: raise IOError("Fermonitor configuration file is not valid: " + CONFIGFILE) try: config = ini['Fermonitor'] except: raise IOError("[Fermonitor] section not found in fermonitor.ini") try: if config["MessageLevel"] == "DEBUG": logLevel = logging.DEBUG elif config["MessageLevel"] == "WARNING": logLevel = logging.WARNING elif config["MessageLevel"] == "ERROR": logLevel = logging.ERROR elif config["MessageLevel"] == "INFO": logLevel = logging.INFO else: logLevel = logging.INFO except KeyError: logLevel = logging.INFO logger.setLevel(logLevel) try: if config["TiltColor"] != "": sTiltColor = config.get("TiltColor") else: raise Exception except: logger.warning("No color specified for Tilt. Tilt not used.") sTiltColor = "" logger.debug("Tilt color: " + sTiltColor) try: if config["ChamberControl"] != "": if config.get("ChamberControl") == "WIRE": chamberControlTemp = CONTROL_WIRE logger.debug("Chamber control temperature based on WIRE") elif config.get("ChamberControl") == "TILT": chamberControlTemp = CONTROL_TILT logger.debug("Chamber control temperature based on TILT") else: chamberControlTemp = CONTROL_WIRE logger.warning( "Invalid ChamberControl configuration; using default: WIRE" ) else: chamberControlTemp = CONTROL_WIRE logger.warning( "Invalid ChamberControl configuration; using default: WIRE") except: chamberControlTemp = CONTROL_WIRE logger.warning( "Invalid ChamberControl configuration; using default: WIRE") try: if config["MessageLevel"] == "DEBUG": logLevel = logging.DEBUG elif config["MessageLevel"] == "WARNING": logLevel = logging.WARNING elif config["MessageLevel"] == "ERROR": logLevel = logging.ERROR elif config["MessageLevel"] == "INFO": logLevel = logging.INFO else: logLevel = logging.INFO except KeyError: logLevel = logging.INFO logger.setLevel(logLevel) logger.debug("Completed reading settings") return
def _readConf(self): try: if os.path.isfile(CONFIGFILE) == False: logger.error("BrewFather configuration file is not valid: " + CONFIGFILE) ini = configparser.ConfigParser() ini.read(CONFIGFILE) if 'BrewFather' in ini: logger.debug("Reading BrewFather config") config = ini['BrewFather'] try: if config["MessageLevel"] == "DEBUG": logger.setLevel(logging.DEBUG) elif config["MessageLevel"] == "WARNING": logger.setLevel(logging.WARNING) elif config["MessageLevel"] == "ERROR": logger.setLevel(logging.ERROR) elif config["MessageLevel"] == "INFO": logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO) except KeyError: logger.setLevel(logging.INFO) if config["Update"] != "": self.bUpdate = strtobool(config.get("Update")) else: raise Excpetion if config["UpdateURL"] != "": self.sURL = config.get("UpdateURL") else: raise Exception try: if config["UpdateIntervalSeconds"] != "": if int(config["UpdateIntervalSeconds"] ) >= MINIMUM_INTERVAL: self.interval = int( config.get("UpdateIntervalSeconds")) else: logger.warning( "Brewfather update interval cannot be less than 15min; using 900s" ) self.interval = MINIMUM_INTERVAL else: raise Exception except: logger.warning( "Error reading Brewfather update interval; using 900s") self.interval = MINIMUM_INTERVAL try: if config["Device"] != "": self.postdata["name"] = config["Device"] else: raise Exception except: self.postdata["name"] = "Fermonitor" except: self.bUpdate = False logger.warning( "Problem read from configuration file: " + CONFIGFILE + ". Updating BrewFather.app is disabled until configuration fixed. It could take a minute for updated values in config file to be used." ) print("[BrewFather]\nUpdate = " + str(self.bUpdate) + "\nUpdateURL = " + self.sURL + "\nUpdateIntervalSeconds = " + str(self.interval)) logger.debug("BrewFather config:\n[BrewFather]\nUpdate = " + str(self.bUpdate) + "\nUpdateURL = " + self.sURL + "\nUpdateIntervalSeconds = " + str(self.interval))
def run_financials(self): if self.financials_are_run: logger.info(f"Financials skipping for {self.symbol}...") else: logger.info(f"Beginning Financials update for {self.symbol}") self.financials_are_run = True yahoo_api_base = "query1.finance.yahoo.com" # BALANCE SHEET SECTION query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=balanceSheetHistory" json = requests.get(query_url).json() time.sleep(2) if json["quoteSummary"]["error"]: logger.error(json["quoteSummary"]["error"]["description"]) else: # The below is a list where each element is one year annual_balance_sheet = json["quoteSummary"]["result"][0][ "balanceSheetHistory"]["balanceSheetStatements"] # Pull all of our parameters of interest into a Python list to use as columns for a Pandas DataFrame balance_timestamps = [] short_term_assets, total_assets = [], [] short_term_liabilities, total_liabilities = [], [] fields = [ "totalCurrentAssets", "totalAssets", "totalCurrentLiabilities", "totalLiab", ] for year in annual_balance_sheet: if all(fields in year for fields in fields): if (year["totalCurrentAssets"] and year["totalAssets"] and year["totalCurrentLiabilities"] and year["totalLiab"]): balance_timestamps.append(year["endDate"]["fmt"]) short_term_assets.append( year["totalCurrentAssets"]["raw"]) total_assets.append(year["totalAssets"]["raw"]) short_term_liabilities.append( year["totalCurrentLiabilities"]["raw"]) total_liabilities.append(year["totalLiab"]["raw"]) if balance_timestamps: self.balance_sheet = pd.DataFrame( { "ShortTermAssets": short_term_assets, "TotalAssets": total_assets, "ShortTermLiabilities": short_term_liabilities, "TotalLiabilities": total_liabilities, }, index=pd.to_datetime(balance_timestamps), ) self.balance_sheet.index.rename("Timestamp", inplace=True) self.balance_sheet.to_csv( f"ticker_info/balance_sheet/{self.symbol}_balance_sheet.csv" ) else: logger.warning( f"Did not receive any Balance Sheet information for {self.symbol}" ) # EPS SECTION query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=earningsHistory" json = requests.get(query_url).json() time.sleep(2) if json["quoteSummary"]["error"]: logger.error(json["quoteSummary"]["error"]["description"]) else: # List of each quarter for the last year quarterly_eps = json["quoteSummary"]["result"][0][ "earningsHistory"]["history"] eps_timestamp, eps = [], [] fields = ["quarter", "epsActual"] for quarter in quarterly_eps: if all(fields in quarter for fields in fields): if quarter["quarter"] and quarter["epsActual"]: eps_timestamp.append(quarter["quarter"]["fmt"]) eps.append(quarter["epsActual"]["raw"]) if eps_timestamp: self.eps = pd.DataFrame( {"EPS": eps}, index=pd.to_datetime(eps_timestamp)) self.eps.index.rename("Timestamp", inplace=True) self.eps.to_csv(f"ticker_info/eps/{self.symbol}_eps.csv") else: logger.warning( f"Did not receive any EPS data for {self.symbol}") # REVENUE SECTION query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=incomeStatementHistory" json = requests.get(query_url).json() time.sleep(2) if json["quoteSummary"]["error"]: logger.error(json["quoteSummary"]["error"]["description"]) else: # List of annual income statement each year for the last 4 years annual_income_statement = json["quoteSummary"]["result"][0][ "incomeStatementHistory"]["incomeStatementHistory"] revenue_timestamp = [] total_revenue, gross_profit, net_income = [], [], [] fields = ["totalRevenue", "grossProfit", "netIncome"] for year in annual_income_statement: if all(fields in year for fields in fields): if (year["totalRevenue"] and year["grossProfit"] and year["netIncome"]): revenue_timestamp.append(year["endDate"]["fmt"]) total_revenue.append(year["totalRevenue"]["raw"]) gross_profit.append(year["grossProfit"]["raw"]) net_income.append(year["netIncome"]["raw"]) if revenue_timestamp: self.income_statement = pd.DataFrame( { "TotalRevenue": total_revenue, "GrossProfit": gross_profit, "NetIncome": net_income, }, index=pd.to_datetime(revenue_timestamp), ) self.income_statement.index.rename("Timestamp", inplace=True) self.income_statement.to_csv( f"ticker_info/revenue/{self.symbol}_revenue.csv") else: logger.warning( f"Did not receive any revenue data for {self.symbol}")
def run_price_and_dividends(self, start_date_epoch, end_date_epoch): if self.price_is_run: logger.info(f"Price already loaded for {self.symbol}, skipping...") else: logger.info( f"\nBeginning Price and Dividend update for {self.symbol}") self.price_is_run = True yahoo_api_base = "query1.finance.yahoo.com" query_url = ( f"http://{yahoo_api_base}/v8/finance/chart/{self.symbol}?symbol={self.symbol}&period1=" f"{int(start_date_epoch)}&period2={int(end_date_epoch)}&interval=1d&events=div" ) json = requests.get(query_url).json() time.sleep(2) if json["chart"]["error"]: logger.error( f'{self.symbol} error: {json["chart"]["error"]["description"]}. Skipping financials' ) self.financials_are_run = True return if "1mo" not in json["chart"]["result"][0]["meta"]["validRanges"]: logger.warning( f"1 month is not a valid price sample rate for {self.symbol}. Skipping price." ) return if not json["chart"]["result"][0]["indicators"]["adjclose"][0]: logger.warning( f"Did not get any price/dividend for {self.symbol}, but not sure why (no error but blank" f" query). Skipping financials") self.financials_are_run = True return # Had to tease the values out of a lot of levels of this JSON reply! price_list = json["chart"]["result"][0]["indicators"]["adjclose"][ 0]["adjclose"] self.price = pd.DataFrame(price_list, columns=["Price"]) # Teased the timestamp out of the JSON response as well, and set the index of my price DF to that for a # Pandas datetime index self.price["Timestamp"] = json["chart"]["result"][0]["timestamp"] self.price["Timestamp"] = self.price["Timestamp"].apply( lambda x: dt.fromtimestamp(x)) self.price.set_index("Timestamp", inplace=True) self.price.to_csv(f"ticker_info/prices/{self.symbol}_price.csv") # I want to create a dividends spreadsheet as well. I can tease the information out of the JSON response try: dividends_dict = json["chart"]["result"][0]["events"][ "dividends"] div_timestamps, div_values = [], [] for v in dividends_dict.values(): div_timestamps.append(dt.fromtimestamp(v["date"])) div_values.append(v["amount"]) self.dividends = pd.DataFrame(div_values, index=div_timestamps, columns=["Dividend"]) self.dividends.index.rename("Timestamp", inplace=True) self.dividends.sort_index(inplace=True) self.dividends.to_csv( f"ticker_info/dividends/{self.symbol}_dividends.csv") except KeyError: logger.warning( f"Dividends not found for {self.symbol}. They may not pay out dividends." )