def __init__(self, config): # loaded from config self.token = load(config, 'BOT_TOKEN') self.channel_id = load(config, 'CHANNEL_ID') self.google_trending_search_image = load(config, 'GOOGLE_TRENDING_IMAGE') # telegram api self.updater = Updater(self.token, use_context=True) # Get the dispatcher to register handlers self.dp = self.updater.dispatcher # bot logger self.logger = logging.getLogger('log') # variables used by bot self.id = 0
def test_change_default_value(monkeypatch): # language=rst """ Load and validate a configuration. """ monkeypatch.setenv('LOGLEVEL', 'WARNING') monkeypatch.setenv('DB_PASS', 'secret123') config = config_loader.load(CONFIG_PATH, SCHEMA_PATH) assert config['logging']['loggers']['authz_admin']['level'] == 'WARNING'
def test_missing_required_value(monkeypatch): # language=rst """ Load and validate a configuration. """ monkeypatch.delenv('LOGLEVEL', raising=False) monkeypatch.delenv('DB_PASS', raising=False) with pytest.raises(config_loader.ConfigError): config = config_loader.load(CONFIG_PATH, SCHEMA_PATH)
def test_default_value(monkeypatch): # language=rst """ Load and validate a configuration. """ monkeypatch.delenv('LOGLEVEL', raising=False) monkeypatch.setenv('DB_PASS', 'secret123') config = config_loader.load(CONFIG_PATH, SCHEMA_PATH) assert config['logging']['loggers']['authz_admin']['level'] == 'DEBUG' assert config['postgres']['password'] == 'secret123'
def best_k(df, config_file, k_max=15): """ Models: '1': 'KMeans', '2': 'KMeans++', '3': 'KMedoids', '4': 'FuzzyCMeans', '5': 'AggloSingle', '6': 'AggloAverage', '7': 'AggloComplete' """ from sklearn.cluster import KMeans from config_loader import load, clf_names from sklearn.metrics import silhouette_score from MyKmeans import MyKmeans config = load(config_file) tol = float(config.get('clustering', 'tol')) max_rep = int(config.get('clustering', 'max_rep')) kmeans_init_type = config.get('clustering', 'kmeans_init_type') x = [1] sil = [0] for k in range(2, k_max + 1): clf = MyKmeans(k, tol, max_rep) clf.fit(df) pred = clf.labels_ x += [k] sil += [silhouette_score(df, pred, metric='euclidean')] plt.figure() plt.plot(x, sil, color='green', marker='o') plt.title('Silhouette Score ' + str(clf.name)) plt.xlabel('Number of Clusters') plt.ylabel('Average Silhouette Score') plt.ylim((0, 1)) plt.xlim((1, k_max + 1)) return plt ## #best_k(2)
def main(): if len(sys.argv) == 2: config = config_loader.load(sys.argv[1]) print ("Configuration loaded for " + config["jobName"] + " job.\n") pin_handler.setUpAllPins(config["pins"]) print ("\nURLs to test for: ") for url in config["urlPrefix"]: print (" " + url) print ("\nPolling at frequency of " + str(config["frequency"]) + " seconds.") print ("\nStarting job...") s = sched.scheduler(time.time, time.sleep) # Do a poll and report now, and repeat on given frequency pollAndReportOnUrls(None, config) s.enter(config["frequency"], 1, pollAndReportOnUrls, (s, config)) s.run() else: print ("Please provide configuration file path as parameter.") print ("Exiting...")
def load(): # language=rst """ Load and validate the configuration. :rtype: types.MappingProxyType .. todo:: Log the chosen path with proper log level. """ config_path = _config_path() config = config_loader.load(config_path, CONFIG_SCHEMA_V1_PATH) logging.config.dictConfig(config['logging']) _logger.info("Loaded configuration from '%s'", os.path.abspath(config_path)) # Procedure logging.config.dictConfig() (called # above) requires a MutableMapping as its input, # so we only freeze config *after* that call: config = frozen(config) _validate_scopes(config) _validate_profiles(config) _validate_roles(config) return config
def main(): if len(sys.argv) == 2: config = config_loader.load(sys.argv[1]) print("Configuration loaded for " + config["jobName"] + " job.\n") pin_handler.setUpAllPins(config["pins"]) print("\nURLs to test for: ") for url in config["urlPrefix"]: print(" " + url) print("\nPolling at frequency of " + str(config["frequency"]) + " seconds.") print("\nStarting job...") s = sched.scheduler(time.time, time.sleep) # Do a poll and report now, and repeat on given frequency pollAndReportOnUrls(None, config) s.enter(config["frequency"], 1, pollAndReportOnUrls, (s, config)) s.run() else: print("Please provide configuration file path as parameter.") print("Exiting...")
def bot(): global log, log_long, stream, chrome, token chrome = initialize(load(os.path.abspath(os.getcwd()) + '\\config.txt'), stream, token, log, log_long) loadTwitch(chrome) collectAndBet(chrome, 'https://www.twitch.tv/' + stream)
import os import sys import socket import cStringIO import threading # read command line and load configuration import config_loader server_config = config_loader.load() server_config["server_type"] = "Monoprocess" server_config["rlock"] = threading.RLock() #config_loader.init_config("Async", threading.RLock()) import k_config k_config.init(server_config) import HTTP import k_version DEBUG = False #True class handler(HTTP.HTTP): """For some reason persistent connections don't work with this monoprocess server. SimpleHTTPServer in Python standard distribution doesn't support them either""" def run(self): self.handle_request() if not self.wfile.closed:
#labels = df_features['Survived'] #df_features = df_features.drop(['Survived'], axis=1) return df_features ## if __name__ == '__main__': ## # Loads config parser = argparse.ArgumentParser() parser.add_argument( "-c", "--config", default="titanic.cfg", help="specify the location of the clustering config file" ) args, _ = parser.parse_known_args() config_file = args.config config = load(config_file) ## verbose = config.get('titanic', 'verbose') path = config.get('titanic', 'path') + '/' file_type = config.get('titanic', 'file_type') filename_type = 'train' if file_type == 'all': filename_type = 'other' print('Filename type:', filename_type) print() ## train trainData = getData(path, filename_type) # Preprocessing
import os import sys import shutil from configparser import ConfigParser from molecule import Molecule import qcalc import gcn_runner import config_loader import output_writer config = config_loader.load() filenames, log_name = config_loader.process_files(config) qcalc.init(config, log_name) with open(config['files']['input_geometry'], 'r') as input_file: molecule = Molecule(input_file.read()) # Step 1 if config['config_generator'].getboolean('optimize'): molecule, energy = qcalc.optimize(molecule, filenames, config) output_writer.write_optimized_geo(molecule, energy, filenames['optimized_geometry']) else:
def main(): if len(sys.argv) == 2: config = config_loader.load(sys.argv[1]) print ("Configuration loaded for " + config["jobName"] + " job.\n") pin_handler.setUpAllPins(config["pins"]) print ("\nURLs to test for: ") for url in config["urlPrefix"]: print (" " + url) print ("\nPolling at frequency of " + str(config["frequency"]) + " seconds.") print ("\nStarting job...") s = sched.scheduler(time.time, time.sleep) # Do a poll and report now, and repeat on given frequency pollAndReportOnUrls(None, config) s.enter(config["frequency"], 1, pollAndReportOnUrls, (s, config)) s.run() else: print ("Please provide configuration file path as parameter.") print ("Exiting...") try: main() except: config = config_loader.load(sys.argv[1]) print("Some sort of exception occurred. Turning off all LEDs...") pin_handler.setLEDs(config["pins"], {"success": 0, "failure": 0, "building": 0})
TOKEN = settings['token'] STREAM = settings['stream'] DEBUG = settings['debug'] chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--mute-audio') if not DEBUG: chrome_options.add_argument('headless') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_argument('--disable-accelerated-2d-canvas') chrome_options.add_argument('--no-first-run') chrome_options.add_argument('--no-zygote') chrome_options.add_argument('--disable-gpu') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-setuid-sandbox') chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) driver = webdriver.Chrome(settings['chrome_path'], options=chrome_options) driver.set_window_size(1366, 768) driver.implicitly_wait(2) return driver if __name__ == "__main__": chrome = __init(load(os.path.abspath(os.getcwd()) + '\\config.txt')) loadTwitch(chrome) collectAndBet(chrome, TWITCH_URL + STREAM) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGSEGV, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
def get_config(): ''' Returns RawConfigParser instance (config is loaded only once). ''' if not config_loader.config_instance: config_loader.load() return config_loader.config_instance
import sys import os import time # Add our script directories to the Python path sys.path.append(sys.path[0] + os.sep + "core") sys.path.append(sys.path[0] + os.sep + "scripts") import irc import util import structures import config_loader # Load IRC parameters from a file irc_params = config_loader.load() # Check our command line arguments for debug level, default to 0 (don't print) if len(sys.argv) > 1: if sys.argv[1].isdigit() == True: debug = int(sys.argv[1]) print("Debug level: " + str(debug)) else: debug = 0 # initialize IRC client irc = irc.IRC(irc_params, debug) # do some setup with the utility module irc.util.auto_join(irc_params.channels) irc.util.set_connect_command(irc_params.connect_command)
header=True, index=False) if __name__ == '__main__': # Loads config parser = argparse.ArgumentParser() parser.add_argument( "-c", "--config", default="random_forest.cfg", help="specify the location of the clustering config file") args, _ = parser.parse_known_args() config_file = args.config config = load(config_file, args) # Dataset config.set('rf', 'dataset', 'lymphography.csv') #config.set('rf', 'dataset', 'breastcancer.csv') #config.set('rf', 'dataset', 'primarytumor.csv') dataset_dir = config.get('rf', 'dataset_dir') dataset = config.get('rf', 'dataset') path_data = path.join(dataset_dir, dataset) try: df_dataset = pd.read_csv(path_data, header=0) except FileNotFoundError: print("Dataset '%s' cannot be found in the path %s" % (dataset, path_data))
import urllib from config_loader import load from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import scoped_session, sessionmaker db_config = load()['database'] params = urllib.parse.quote_plus('DRIVER={SQL Server Native Client 11.0};' + 'SERVER={0};'.format(db_config['server']) + 'DATABASE={0};'.format(db_config['database']) + 'UID={0};'.format(db_config['uid']) + 'PWD={0}'.format(db_config['password']) ) conn_string = 'mssql+pyodbc:///?odbc_connect={}'.format(params) engine = create_engine(conn_string) db_session = scoped_session(sessionmaker( autocommit=False, autoflush=False, bind=engine )) Base = declarative_base() Base.query = db_session.query_property()
if len(sys.argv) == 2: config = config_loader.load(sys.argv[1]) print("Configuration loaded for " + config["jobName"] + " job.\n") pin_handler.setUpAllPins(config["pins"]) print("\nURLs to test for: ") for url in config["urlPrefix"]: print(" " + url) print("\nPolling at frequency of " + str(config["frequency"]) + " seconds.") print("\nStarting job...") s = sched.scheduler(time.time, time.sleep) # Do a poll and report now, and repeat on given frequency pollAndReportOnUrls(None, config) s.enter(config["frequency"], 1, pollAndReportOnUrls, (s, config)) s.run() else: print("Please provide configuration file path as parameter.") print("Exiting...") try: main() except: config = config_loader.load(sys.argv[1]) print("Some sort of exception occurred. Turning off all LEDs...") pin_handler.setLEDs(config["pins"], {"success": 0, "failure": 0})