Example #1
0
    def connect(self):

        conf = load_conf()
        host = conf["host_sql"]
        port = conf["port_sql"]
        user = conf["user_sql"]
        passwd = conf["password_sql"]
        db = conf["database_sql"]

        self._conn = pymysql.connect(host=host, port=port, user=user,
                                     password=passwd, database=db)
        self._cur = self._conn.cursor()
Example #2
0
def main(args):
    conf = load_conf(args)
    mysqlInfo = MysqlInfo(conf['dsn'])

    tab_list = mysqlInfo.get_table_list(conf.get('include_table', []),
                                        conf.get('exclude_table', []))

    create_yml(tab_list)

    yml_file = get_yml_file()

    run_zr(yml_file, get_conf())
Example #3
0
    def connect(self):

        conf = load_conf()
        host = conf["host"]
        port = conf["port"]
        user = conf["user"]
        passwd = conf["password"]
        db = conf["database"]

        self._conn = psycopg2.connect(host=host, port=port, user=user,
                                      password=passwd, database=db)
        self._cur = self._conn.cursor()
Example #4
0
File: anna.py Project: maffe/anna
def main():
    """Start one instance of the Anna bot."""
    global bot
    print __doc__
    modify_syspath()
    p = optparse.OptionParser(USAGE)
    p.set_defaults(loglevel=logging.WARNING)
    p.add_option("-f", "--file", help="use specified configuration file "
            "instead of default (~/.anna/config)")
    p.add_option("-l", "--list", action="callback", callback=print_frontends,
            help="print a list of available frontends")
    p.add_option("-q", "--quiet", action="store_const", const=logging.ERROR,
            dest="loglevel", help="Only report severe errors.")
    p.add_option("-v", "--verbose", action="store_const", const=logging.INFO,
            dest="loglevel", help="Report informational messages.")
    p.add_option("-d", "--debug", action="store_const", const=logging.DEBUG,
            dest="loglevel", help="Report debugging info.")
    (options, args) = p.parse_args()
    if len(args) == 0:
        p.error("You need to specify at least one frontend. See --list.")
    logging.basicConfig(level=options.loglevel, format=LOGFORMAT,
            datefmt=LOGDATEFMT)
    # Temporary: filter out all non-anna debug messages.
    logging.root.handlers[0].addFilter(logging.Filter('anna'))
    config.load_conf(options.file)
    c.start()
    _import_frontends(args)
    import signal
    signal.signal(signal.SIGINT, stop)
    bot = Anna(args)
    bot.start()
    # time.sleep() is interrupted by signals, unlike threading.Thread.join().
    while bot.is_running():
        time.sleep(3)
    c.stop()
    _logger.info("Bot stopped.")
    logging.shutdown()
Example #5
0
def main():
    global API

    gc.enable()

    if config.CONF == None:
        subprocess.call(["onedrive-prefs"])
        config.load_conf()

    try:
        if config.CONF == None:
            raise ValueError()
        API = api_v5.PersistentOneDriveAPI.from_conf("~/.lcrc")
        quota = API.get_quota()
        config.QUOTA["free"] = quota[0]
        config.QUOTA["total"] = quota[1]
        config.AUTHENTICATED = True
    except (ValueError, IOError, api_v5.AuthenticationError) as e:
        ret = subprocess.call(["onedrive-prefs"])
        config.load_conf()
        try:
            API = api_v5.PersistentOneDriveAPI.from_conf("~/.lcrc")
            quota = API.get_quota()
            config.QUOTA["free"] = quota[0]
            config.QUOTA["total"] = quota[1]
            config.AUTHENTICATED = True
        except:
            print "OneDrive-d cannot get information from the server. Exit."
            sys.exit(1)

    gc.collect()

    if not config.AUTHENTICATED or config.CONF == None:
        print "OneDrive-d was not authenticated or properly configured. Exit."
        sys.exit(1)

    OneDrive_StatusIcon(API).run()
Example #6
0
def main():
	global API	
	
	gc.enable()
	
	if config.CONF == None:
		subprocess.call(["onedrive-prefs"])
		config.load_conf()
	
	try:
		if config.CONF == None:
			raise ValueError()
		API = api_v5.PersistentOneDriveAPI.from_conf("~/.lcrc")
		quota = API.get_quota()
		config.QUOTA["free"] = quota[0]
		config.QUOTA["total"] = quota[1]
		config.AUTHENTICATED = True
	except (ValueError, IOError, api_v5.AuthenticationError) as e:
		ret = subprocess.call(["onedrive-prefs"])
		config.load_conf()
		try:
			API = api_v5.PersistentOneDriveAPI.from_conf("~/.lcrc")
			quota = API.get_quota()
			config.QUOTA["free"] = quota[0]
			config.QUOTA["total"] = quota[1]
			config.AUTHENTICATED = True
		except:
			print "OneDrive-d cannot get information from the server. Exit."
			sys.exit(1)
	
	gc.collect()
	
	if not config.AUTHENTICATED or config.CONF == None:
		print "OneDrive-d was not authenticated or properly configured. Exit."
		sys.exit(1)
	
	OneDrive_StatusIcon(API).run()
def main():
    config = load_conf(args)
    app = init_app(config)
    if app.config.WEBSOCKET:
        from sanic.websocket import WebSocketProtocol
        app.run(host=app.config.HOST,
                port=app.config.PORT,
                worker=app.config.WORKER,
                protocol=WebSocketProtocol,
                debug=False,
                access_log=False)
    else:
        app.run(host=app.config.HOST,
                port=app.config.PORT,
                worker=app.config.WORKER,
                debug=False,
                access_log=False)
Example #8
0
    def update(self, names):
        """
        Update the filters which name are contained in names
        configuration and process.

        :param names: A list containing the names of the filter to update.
        :return A empty list on success. A list containing error messages on failure.
        """
        from config import filters as conf_filters
        logger.debug("Update: Trying to open config file")
        try:
            #Reload conf, global variable 'conf_filters' will be updated
            load_conf()
        except ConfParseError:
            error = "Update: wrong configuration format, unable to update"
            logger.error(error)
            return error

        logger.info("Update: Configuration loaded")

        with self._lock:
            errors = []
            new = {}
            for n in names:
                try:
                    new[n] = conf_filters[n]
                except KeyError:
                    try:
                        self.stop_one(self._filters[n], no_lock=True)
                        self.clean_one(self._filters[n], no_lock=True)
                    except KeyError:
                        errors.append({
                            "filter": n,
                            "error": 'Filter not existing'
                        })
                    self._filters.pop(n, None)
                    continue
                try:
                    new[n]['extension'] = '.1' if self._filters[n][
                        'extension'] == '.2' else '.2'
                except KeyError:
                    new[n]['extension'] = '.1'

                try:
                    new[n]['failures'] = self._filters[n]['failures']
                except KeyError:
                    new[n]['failures'] = 0

                new[n][
                    'pid_file'] = '/var/run/darwin/{name}{extension}.pid'.format(
                        name=n, extension=new[n]['extension'])

                new[n][
                    'socket'] = '/var/sockets/darwin/{name}{extension}.sock'.format(
                        name=n, extension=new[n]['extension'])

                new[n][
                    'monitoring'] = '/var/sockets/darwin/{name}_mon{extension}.sock'.format(
                        name=n, extension=new[n]['extension'])

            for n, c in new.items():
                cmd = self._build_cmd(c)
                p = Popen(cmd)
                try:
                    p.wait(timeout=1)
                except TimeoutExpired:
                    if c['log_level'].lower() == "developer":
                        logger.debug(
                            "Debug mode enabled. Ignoring timeout at process startup."
                        )
                    else:
                        logger.error(
                            "Error starting filter. Did not daemonize before timeout. Killing it."
                        )
                        p.kill()
                        p.wait()
                ret = Services._wait_process_ready(c)
                if ret:
                    logger.error("Unable to update filter {}: {}".format(
                        n, ret))
                    # Then there is an error
                    errors.append({"filter": n, "error": ret})
                    try:
                        self.stop(n, c['pid_file'])
                        self.clean_one(c, no_lock=True)
                    except Exception:
                        pass
                    continue

                c['status'] = psutil.STATUS_RUNNING

                logger.info("Switching filters symlink...")
                try:
                    if call(['ln', '-sfn', c['socket'], c['socket_link']
                             ]) != 0:
                        raise Exception(
                            'Unable to update filter\'s socket symlink')
                except Exception as e:
                    logger.error("Unable to link new filter {}: {}".format(
                        n, e))
                    errors.append({"filter": n, "error": "{0}".format(e)})
                    try:
                        self.stop(n, c['pid_file'])
                        self.clean_one(c, no_lock=True)
                    except Exception:
                        pass
                    continue

                try:
                    logger.info("Killing older filter...")
                    # Call 'stop' instead of 'stop_one' to avoid deletion of socket_link
                    self.stop(n, self._filters[n]['pid_file'])
                    self.clean_one(self._filters[n], no_lock=True)
                except KeyError:
                    logger.info("no older filter to kill, finalizing...")
                    pass
                self._filters[n] = deepcopy(c)
                logger.info("successfully updated {}".format(n))

        return errors
Example #9
0
if __name__ == '__main__':

    signal.signal(signal.SIGINT, sig_hdlr)
    signal.signal(signal.SIGTERM, sig_hdlr)
    signal.signal(signal.SIGUSR1, rotate_logs)
    signal.signal(signal.SIGHUP, rotate_logs)

    logger.info("Starting...")
    logger.info("prefix path is '{}'".format(prefix))
    logger.info("suffix path is '{}'".format(suffix))

    server = Server(prefix, suffix)
    logger.info("Configuring...")
    try:
        load_conf(prefix, suffix, args.config_file)
    except ConfParseError as e:
        logger.critical(e)
        exit(1)

    services = Services(conf_filters)
    try:
        logger.info("Starting services...")
        services.start_all()
    except Exception as e:
        logger.error(
            "Could not start all the services: {0}; exiting".format(e))
        services.stop_all()
        exit(1)

    stopCond = Condition()
Example #10
0
            s3.upload_file('mix.log', bucket_name, 'mix.log')
        return receipt_handle
    # If anything fails, return a null value
    except:
        # Upload the log file
        s3.upload_file('mix.log', bucket_name, 'mix.log')
        return None


#---------------------------------------------------------

if __name__ == '__main__':

    # Load the configuration file, which contains details about
    # The AWS instances to use
    config.load_conf()
    conf = config.conf

    sqs = boto3.client('sqs', conf['region'])
    queue_url = conf['queue_url']

    # Keep polling the queue for jobs to process
    while 1:
        response = sqs.receive_message(QueueUrl=queue_url)
        # If there is a message in the queue, process the object described
        # in the message
        if 'Messages' in response:
            # Attemp to get the input file, generate the mix, and
            # write the result back to S3
            receipt_handle = process_s3_file(response)
            # Remove the message from the queue if it was successfully processed
Example #11
0
import config

import discord
from discord.ext import commands
from exceptions import SeedBotException
from database import Database
from cogs.weekly import Weekly
from helpers import GameConverter, DatetimeConverter, TimeConverter, get_discord_name

import re
import logging

logger = logging.getLogger(__name__)

cfg = config.load_conf()
db = Database(**cfg['database'])

intents = discord.Intents.default()
intents.members = True


class SeedbotHelpCommand(commands.DefaultHelpCommand):
    def __init__(self):
        super().__init__(command_attrs={
            "name": "ajuda",
            "help": "Mostrar esta mensagem"
        },
                         commands_heading="Comandos:",
                         no_category="Sem categoria",
                         dm_help=True)
Example #12
0
    signal.signal(signal.SIGINT, sig_hdlr)
    signal.signal(signal.SIGTERM, sig_hdlr)
    signal.signal(signal.SIGUSR1, rotate_logs)
    signal.signal(signal.SIGHUP, rotate_logs)

    logger.info("Starting...")
    daemon_context = DaemonContext(
        pidfile=FileLock('/var/run/darwin/manager.pid'), )
    daemon_context.detach_process = False
    logger.debug("daemon DONE")

    server = Server()
    logger.info("Configuring...")
    try:
        load_conf(args.config_file)
    except ConfParseError as e:
        logger.critical(e)
        exit(1)

    services = Services(conf_filters)
    try:
        logger.info("Starting services...")
        services.start_all()
    except Exception as e:
        logger.error(
            "Could not start all the services: {0}; exiting".format(e))
        services.stop_all()
        exit(1)

    stopCond = Condition()