Пример #1
0
def local():
    # NOTE: in theory a local environment doesn't need any roledef, because
    # we use Profile.development for running processes and all fabric
    # operations should be run via local(). BUT, in fact, they are run via
    # run() or sudo() (sparks doesn't make any difference, all tasks are
    # "remote"), that's why we populate roledefs, to benefit from all sparks
    # tasks.
    set_roledefs_and_parallel({
        'db': ['localhost'],
        'web': ['localhost'],
        'lang': ['localhost'],
        'beat': ['localhost'],
        'shell': ['localhost'],
        'flower': ['localhost'],
        'worker': ['localhost'],
    })

    # As of sparks 2.2+, it should not be needed to set env.host_string anymore,
    # but it's still comfortable to have it when lanching tasks that are not
    # fully roledefs-compatible (eg. sdf.compilemessages ran alone).
    env.host_string = 'localhost'

    if USE_JENKINS:
        env.root = JENKINS_ROOT
    else:
        env.root = os.path.expanduser('~/sources/1flow')

    env.env_was_set = True
Пример #2
0
def local():
    """ The local-development task. """

    # NOTE: in theory a local environment doesn't need any roledef, because
    # we use Profile.development for running processes and all fabric
    # operations should be run via local(). BUT, in fact, they are run via
    # run() or sudo() (sparks doesn't make any difference, all tasks are
    # "remote"), that's why we populate roledefs, to benefit from all sparks
    # tasks.
    set_roledefs_and_parallel({
        'db': ['localhost'],
        'web': ['localhost'],
        'lang': ['localhost'],
        'beat': ['localhost'],
        'shell': ['localhost'],
        'flower': ['localhost'],
        'worker': ['localhost'],
    })

    # As of sparks 2.2+, it should not be needed to set env.host_string anymore,
    # but it's still comfortable to have it when lanching tasks that are not
    # fully roledefs-compatible (eg. sdf.compilemessages ran alone).
    env.host_string = 'localhost'
    env.environment  = 'development'

    if USE_JENKINS:
        env.root = JENKINS_ROOT
    else:
        env.root = os.path.expanduser('~/sources/1flow')

    env.env_was_set = True
Пример #3
0
def zero(branch=None):
    """ A master clone, restarted from scratch everytime to test migrations. """

    set_roledefs_and_parallel({
        'db': ['zero.1flow.io'],
        'web': ['zero.1flow.io'],
        'beat': ['zero.1flow.io'],
        'shell': ['zero.1flow.io'],
        'worker': ['zero.1flow.io'],
        'flower': ['zero.1flow.io'],
    })

    if branch is None:
        env.branch = 'develop'
    else:
        env.branch = get_current_git_branch()

    env.host_string = 'zero.1flow.io'

    env.sparks_options = {
        'worker_concurrency': {
            '__all__': 2,
        }
    }

    # env.user is set via .ssh/config
    env.env_was_set = True
Пример #4
0
def zero(branch=None):
    """ A master clone, restarted from scratch everytime to test migrations. """

    set_roledefs_and_parallel({
        'db': ['zero.1flow.io'],
        'web': ['zero.1flow.io'],
        'beat': ['zero.1flow.io'],
        'shell': ['zero.1flow.io'],
        'worker': ['zero.1flow.io'],
        'flower': ['zero.1flow.io'],
    })

    if branch is None:
        env.branch = 'develop'
    else:
        env.branch = get_current_git_branch()

    env.host_string = 'zero.1flow.io'

    env.sparks_options = {
        'worker_concurrency': {
            '__all__': 2,
        }
    }

    # env.user is set via .ssh/config
    env.env_was_set = True
Пример #5
0
def preview(branch=None):
    """ This is the default config, we don't need to set anything more.

        To create a new test environment:

        adapt .ssh/config
        ssh duncan
        lxc-clone
        in the LXC rootfs, clean /etc/supervisor/conf.d/*
        edit firewall / iptables
        start LXC

    """

    #
    # WARNING: when adding preview machine(s),
    # don't forget to add django settings files for them…
    #
    set_roledefs_and_parallel({
        'db': ['obi.1flow.io'],
        'web': ['obi.1flow.io'],
        'lang': ['obi.1flow.io'],
        'beat': ['obi.1flow.io'],
        'shell': ['worbi.1flow.io'],
        'flower': ['worbi.1flow.io'],
        'worker_high': ['obi.1flow.io'],
        'worker_medium': ['worbi.1flow.io'],
        'worker_low': ['worbi.1flow.io'],
    })

    if branch is None:
        env.branch = get_current_git_branch()
    # implicit: else: branch will be 'develop',
    # set directly from the sparks defaults.
    env.sparks_options = {
        'nice_arguments': {
            'worker_low': '-n 5',
            'worker_medium': '-n 1',
            'worker_high': '-n -3',
            'worker_medium': '-n 1',
        },

        'worker_concurrency': {
            'worker_low': 4,
            'worker_medium': 4,
            'worker_high': 6,
        }
    }

    # we force the user because we can login as standard user there
    env.user        = '******'
    env.env_was_set = True
Пример #6
0
def preview(branch=None):
    """ Default config. We don't need to set anything more.

    To create a new test environment:

        adapt .ssh/config
        ssh duncan
        lxc-clone
        in the LXC rootfs, clean /etc/supervisor/conf.d/*
        edit firewall / iptables
        start LXC

    """

    #
    # WARNING: when adding preview machine(s),
    # don't forget to add django settings files for them…
    #
    set_roledefs_and_parallel({
        'db': ['obi.1flow.io'],
        'web': ['obi.1flow.io'],
        'lang': ['obi.1flow.io'],
        'beat': ['obi.1flow.io'],
        'shell': ['worbi.1flow.io'],
        'flower': ['worbi.1flow.io'],
        'worker_high': ['obi.1flow.io'],
        'worker_medium': ['worbi.1flow.io'],
        'worker_low': ['worbi.1flow.io'],
    })

    if branch is None:
        env.branch = get_current_git_branch()
    # implicit: else: branch will be 'develop',
    # set directly from the sparks defaults.

    env.sparks_options = {
        'nice_arguments': {
            # 'worker_low': '-n 5',
            # 'worker_medium': '-n 1',
            'worker_high': '-n -3',
        },

        'worker_concurrency': {
            'worker_low': 2,
            'worker_medium': 2,
            'worker_high': 4,
        }
    }

    # we force the user because we can login as standard user there
    env.user        = '******'
    env.env_was_set = True
Пример #7
0
def production():
    # we force the user because we can login as standard user there
    env.user        = '******'
    env.environment = 'production'
    set_roledefs_and_parallel({
        'db': ['1flow.io'],
        'web': ['1flow.io'],
        'beat': ['worker-01.1flow.io', ],
        'shell': ['worker-03.1flow.io', ],
        'flower': ['worker-01.1flow.io', ],

        'worker_high':       ['worker-01.1flow.io',
                              'worker-02.1flow.io',
                              'worker-03.1flow.io',
                              'worker-04.1flow.io', ],

        'worker_medium':     ['worker-02.1flow.io',
                              'worker-03.1flow.io',
                              'worker-04.1flow.io', ],

        'worker_low':        ['worker-03.1flow.io',
                              'worker-04.1flow.io', ],

        'worker_fetch':      ['worker-02.1flow.io',
                              'worker-03.1flow.io',
                              'worker-04.1flow.io', ],

        'worker_swarm':      ['worker-03.1flow.io',
                              'worker-04.1flow.io', ],

        'worker_clean':      ['worker-04.1flow.io', ],

        'worker_background': ['worker-04.1flow.io', ],
    })
    env.sparks_options = {
        'nice_arguments': {
            'worker_low': '-n 3',
            'worker_fetch': '-n 5',
            'worker_background': '-n 10',
            'worker_swarm': '-n 2',
            'worker_medium': '-n 1',
            'worker_high': '-n -3',
            'shell': '-n -1',
        },

        'ionice_arguments': {
            #'shell': '-n -1',
        },

        'repository': {
            # We need to patch this because worker-04 is an LXC on the same
            # physical host than dev.1flow.net, and getting from it directly
            # doesn't work because of my NAT configuration.
            '__all__': '[email protected]:1flow.git',
        },

        'autoscale': {
            'worker_swarm': '32,2',
            'worker_fetch': '24,1',
            'worker_background': '4,0',
            'worker_high': '8,1',

            # Maximum one worker to avoid hammering
            # the database with huge requests.
            'worker_clean': '1,0',

            '__all__': '8,0',
        },

        'max_tasks_per_child': {
            # 2014-03-10: whereas many things have improved with celery 3.1,
            # we still face the problem of slowly leaking workers. Surely it
            # comes from our code, but I didn't find an easy way to find out
            # exactly where. Thus, we relaunch workers every now and then.
            #'worker_swarm': '16',

            # Fetchers can literally eat memory. RECYCLE.
            'worker_fetch': '8',

            # Cleaning tasks are long; worker consumes ~500Mb after first run.
            'worker_clean': '1',

            '__all__': '32',
        },

        # Time-limit is useless because there is already the socket timeout.
        # And anyway, it's leaking memory in celery 3.0.x.
        'worker_soft_time_limit': {
            'worker_swarm': '120',

            # I consider that 5 minutes is enough to convert an article to
            # markdown. If it doesn't acheive the conversion in this time
            # frame, there is probably a more serious problem. Note that it
            # can take time because of high niceness of worker processes,
            # eg. they run at low priority, and a bunch of them on only a
            # few cpu cores. So we have to let them a fair amount of time.
            'worker_fetch': '60',
        },
    }

    env.env_was_set = True
Пример #8
0
def production():
    """ The 1flow.io production configuration task. """

    # we force the user because we can login as standard user there
    env.user        = '******'
    env.environment = 'production'

    # Switch to develop to avoid needing to release a new version
    # for every little patch. 1flow.io is our new testbed. In fact,
    # It's a continuous delivery platform :-D
    env.branch = 'develop'

    env.sparks_options = {
        # 'shell_arguments': {
        #     'command_post_args': "--NotebookApp.ip='*'",
        # },

        'worker_information': {
            'worker_mongo': (
                'MongoDB worker (transient)',
                'high,medium,low',
            ),
            'worker_sync': (
                'Inter-node synchronization worker',
                'sync,background',
            ),
            'worker_permanent': (
                'Inter-node synchronization worker',
                'permanent',
            ),
            'worker_net': (
                'Network-related worker',
                'swarm,refresh',
            ),
            'worker_default': (
                'Default celery queue worker',
                'default,create',
            ),
            'worker_articles': (
                'Articles parsing worker',
                'fetch',
            ),
            'worker_longtasks': (
                'Long tasks worker',
                'check,clean',
            ),
        },

        'custom_arguments': {
            'worker_articles': (
                '--without-heartbeat --without-mingle --without-gossip '
                '--time-limit 210'),
            '__all__': '--without-heartbeat --without-mingle --without-gossip',
        },

        'nice_arguments': {

            # Lower priority
            'worker_articles': '-n 5',
            'worker_mongo': '-n 3',
            'worker_longtasks': '-n 1',

            # Higher priority
            'flower': '-n -1',
            'shell': '-n -5',

            # Others are default.
        },

        'ionice_arguments': {
            'shell': '-c 2 -n 1',
        },

        # 'worker_pool': {
        #     'worker_permanent': 'prefork',
        #     '__all__': 'gevent',
        # },

        # 'repository': {
        #     '__all__': '[email protected]:1flow.git',
        # },

        'autoscale': {                      # queues:
            'worker_mongo':     '144,2',     # 'high,medium,low',
            'worker_net':       '27,9',     # 'swarm,refresh',
            # 'worker_default':   '16,4',     # 'default,create',
            # 'worker_sync':      '16,4',    # 'sync,background',
            'worker_articles':  '12,4',      # 'fetch',
            'worker_longtasks': '2,1',      # 'check,clean',

            '__all__': '9,3',
        },

        'max_tasks_per_child': {
            # Cleaning tasks are long; worker
            # consumes ~500Mb after first run.
            'worker_longtasks': '1',

            # Clean often to avoid eventual memory leaking
            # (instanciating processors external parsers, mainly).
            'worker_articles': '32',

            '__all__': '128',
        },

        'worker_soft_time_limit': {
            'worker_net': '120',

            # I consider that a few minutes is enough to fetch an article
            # and convert it to markdown on a loaded system. Note that it
            # can take time because of low priority of worker processes.
            # So we have to let them a fair amount of time. If it doesn't
            # acheive the conversion in this time frame, there is probably
            # a more serious problem.
            'worker_articles': '180',

            # Force Twitter permanent workers to
            # release locks & free resources.
            'worker_permanent': '300',

            # 7 days: 604800
            # 4 days seems to be a good start.
            # If tasks last more than that, they should
            # probably be split into smaller parts.
            'worker_longtasks': '345600',

            # For general-purpose tasks, 10 minutes can seem very long.
            # On a loaded system, this is reasonable for medium-duration
            # tasks.
            '__all__': '600',
        },
    }

    set_roledefs_and_parallel({
        'db': ['1flow.io', ],
        'web': ['1flow.io', ],

        'beat': ['worker-01.1flow.io', ],
        'flower': ['worker-01.1flow.io', ],
        'shell': ['worker-01.1flow.io', ],

        'worker_sync': [
            'worker-01.1flow.io',
        ],
        'worker_default': [
            'worker-02.1flow.io',
        ],
        'worker_net': [
            'worker-02.1flow.io',
        ],
        'worker_articles': [
            # '1flow.io',
            'worker-03.1flow.io',
            # '10.0.3.111',
        ],
        'worker_longtasks': [
            'worker-04.1flow.io',
        ],

        #
        # 2015-01-27: I ran:
        #   fab prod R:worker_mongo stop
        #   fab prod R:worker_mongo remove
        #
        # And commented the following lines.
        #
        # 'worker_mongo': [
        #     'worker-04.1flow.io',
        # ],
    })
    env.env_was_set = True