def test_rounds(self): with settings(BCRYPT_ROUNDS=0): self.assertEqual(get_rounds(), 0) with settings(BCRYPT_ROUNDS=5): self.assertEqual(get_rounds(), 5) with settings(BCRYPT_ROUNDS=NotImplemented): self.assertEqual(get_rounds(), 12)
def check_requirements(): """ Checks requirements no remote host """ missing = [] print(green('Checking requirements')) for tool in ['compass', 'cake', 'virtualenv', 'git', 'exiftool']: with settings(hide('warnings', 'stdout'), warn_only=True): result = run('which %s' % tool) if result.failed: missing.append(tool) if missing: abort(red('Please install missing packages: %s' % ', '.join(missing))) missing = [] print(green('Checking node requirements')) for package in ['stitch', 'uglify-js', 'coffee-script']: with settings(hide('warnings', 'stdout'), warn_only=True): result = run('npm ls | grep "%s"' % package) if result.failed: missing.append(package) if missing: abort(red('Please install missing node packages: %s' % ', '.join(missing)))
def test_enabled_under_test(self): with settings(BCRYPT_ENABLED_UNDER_TEST=True): self.assertTrue(is_enabled()) with settings(BCRYPT_ENABLED_UNDER_TEST=False): self.assertFalse(is_enabled()) with settings(BCRYPT_ENABLED_UNDER_TEST=NotImplemented): self.assertFalse(is_enabled())
def test_enabled(self): with settings(BCRYPT_ENABLED=False): self.assertFalse(is_enabled()) with settings(BCRYPT_ENABLED=True): self.assertTrue(is_enabled()) with settings(BCRYPT_ENABLED=NotImplemented): self.assertTrue(is_enabled())
def test_migrate_to_bcrypt(self): with settings(BCRYPT_MIGRATE=False): self.assertEqual(migrate_to_bcrypt(), False) with settings(BCRYPT_MIGRATE=True): self.assertEqual(migrate_to_bcrypt(), True) with settings(BCRYPT_MIGRATE=NotImplemented): self.assertEqual(migrate_to_bcrypt(), False)
def update_remote_db(self): with settings(warn_only=True): result = run(self.env['workon_remote'] + 'python %s/d/hot/manage.py syncdb' % self.env['rpath']) if result.failed and not confirm('Syncdb failed! Continue?'): abort('Aborting') with settings(warn_only=True): result = run(self.env['workon_remote'] + 'python %s/d/hot/manage.py migrate' % self.env['rpath']) if result.failed and not confirm('Migrate failed! Continue?'): abort('Aborting')
def __run(): for app in ["djcelery"]: with settings(warn_only=True): print "Migrating %s ..." % app local("python manage.py migrate %s --settings=settings.dev" % (app)) for app in enumerate_apps(): with settings(warn_only=True): local_migrate(app) start_foreman()
def install_site(): "Add the virtualhost config file to the webserver's config, activate logrotate" require('release', provided_by=[deploy, setup]) with cd('%(path)s/releases/%(release)s' % env): sudo( 'cp server-setup/%(webserver)s.conf /etc/%(webserver)s/sites-available/%(prj_name)s' % env, pty=True) if env.use_daemontools: # activate new service runner sudo( 'cp server-setup/service-run.sh /etc/service/%(prj_name)s/run; chmod a+x /etc/service/%(prj_name)s/run;' % env, pty=True) else: # delete old service dir sudo( 'echo; if [ -d /etc/service/%(prj_name)s ]; then rm -rf /etc/service/%(prj_name)s; fi' % env, pty=True) if env.use_supervisor: # activate new supervisor.ini sudo( 'cp server-setup/supervisor.ini /etc/supervisor/%(prj_name)s.ini' % env, pty=True) if env.use_celery: sudo( 'cp server-setup/supervisor-celery.ini /etc/supervisor/%(prj_name)s-celery.ini' % env, pty=True) else: # delete old config file sudo( 'echo; if [ -f /etc/supervisor/%(prj_name)s.ini ]; then supervisorctl %(prj_name)s:appserver stop rm /etc/supervisor/%(prj_name)s.ini; fi' % env, pty=True) if env.use_celery: sudo( 'echo; if [ -f /etc/supervisor/%(prj_name)s-celery.ini ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/%(prj_name)s-celery.ini; fi' % env, pty=True) if env.use_celery and env.use_daemontools: sudo( 'cp server-setup/service-run-celeryd.sh /etc/service/%(prj_name)s-celery/run; chmod a+x /etc/service/%(prj_name)s-celery/run;' % env, pty=True) # try logrotate with settings(warn_only=True): sudo( 'cp server-setup/logrotate.conf /etc/logrotate.d/website-%(prj_name)s' % env, pty=True) with settings(warn_only=True): sudo( 'cd /etc/%(webserver)s/sites-enabled/; ln -s ../sites-available/%(prj_name)s %(prj_name)s' % env, pty=True)
def setup_shell(): with cd('$HOME'): with settings(warn_only=True): run('curl -L http://install.ohmyz.sh | sh') sudo('chsh $USER -s $(which zsh);') run('usermod -s $(which zsh) $USER;') run('curl https://gist.githubusercontent.com/tijptjik/97e1e0380a21249b49d9/raw/9071ee07f29cad69cad70d82d3f1f55033080561/prose.zsh-theme >> .oh-my-zsh/themes/prose.zsh-theme') run('mkdir -p $HOME/.tools') with settings(warn_only=True): run('git clone https://github.com/rupa/z.git $HOME/.tools/z') run('curl https://gist.githubusercontent.com/tijptjik/ac9555e37364287aac37/raw/ecd9fec1fb1e5e4de1181e31e852ddb7205c640b/.zshrc > .zshrc') run('source $HOME/.zshrc')
def test_change_rounds(self): user = User() # Hash with 5 rounds with settings(BCRYPT_ROUNDS=5): bcrypt_set_password(user, 'password') password_5 = user.password self.assertTrue(bcrypt_check_password(user, 'password')) # Hash with 12 rounds with settings(BCRYPT_ROUNDS=12): bcrypt_set_password(user, 'password') password_12 = user.password self.assertTrue(bcrypt_check_password(user, 'password'))
def preference(request): allergy = get_object_or_404(Allergy, user=request.user) # A form for editing if this is a GET request if request.method == 'GET': preferenceForm = PreferenceForm(instance=allergy) return settings(request, preferenceForm) preferenceForm = PreferenceForm(request.POST, instance=allergy) if preferenceForm.is_valid(): preferenceForm.save() return settings(request, preferenceForm)
def test_migrate_bcrypt_to_bcrypt(self): user = User(username='******') with settings(BCRYPT_MIGRATE=True, BCRYPT_ROUNDS=10, BCRYPT_ENABLED_UNDER_TEST=True): user.set_password('password') with settings(BCRYPT_MIGRATE=True, BCRYPT_ROUNDS=12, BCRYPT_ENABLED_UNDER_TEST=True): user.check_password('password') salt_and_hash = user.password[3:] self.assertEqual(salt_and_hash.split('$')[2], '12') self.assertEqual(User.objects.get(username='******').password, user.password)
def test_migrate_bcrypt_to_bcrypt(self): user = User(username='******') with settings(BCRYPT_MIGRATE=True, BCRYPT_ROUNDS=10, BCRYPT_ENABLED_UNDER_TEST=True): user.set_password('password') with settings(BCRYPT_MIGRATE=True, BCRYPT_ROUNDS=12, BCRYPT_ENABLED_UNDER_TEST=True): user.check_password('password') salt_and_hash = user.password[3:] self.assertEqual(salt_and_hash.split('$')[2], '12') self.assertEqual( User.objects.get(username='******').password, user.password)
def __init__(self, host, params=None, interactive=None, file_mode=None, dir_mode=None, uid=None, gid=None, known_host_file=None, root_path=None, base_url=None): self._host = host or settings('SFTP_STORAGE_HOST') self._params = params or setting('SFTP_STORAGE_PARAMS', {}) self._interactive = setting('SFTP_STORAGE_INTERACTIVE', False) \ if interactive is None else interactive self._file_mode = setting('SFTP_STORAGE_FILE_MODE') \ if file_mode is None else file_mode self._dir_mode = setting('SFTP_STORAGE_DIR_MODE') if \ dir_mode is None else dir_mode self._uid = setting('SFTP_STORAGE_UID') if uid is None else uid self._gid = setting('SFTP_STORAGE_GID') if gid is None else gid self._known_host_file = setting('SFTP_KNOWN_HOST_FILE') \ if known_host_file is None else known_host_file self._root_path = setting('SFTP_STORAGE_ROOT', '') \ if root_path is None else root_path self._base_url = setting('MEDIA_URL') if base_url is None else base_url # for now it's all posix paths. Maybe someday we'll support figuring # out if the remote host is windows. self._pathmod = posixpath
def finalize_remote(self): with settings(warn_only=True): run('rm -rf %s/d/previous' % self.env['rpath']) run('mv %s/d/current %s/d/previous' % (self.env['rpath'], self.env['rpath'])) with settings(warn_only=True): run('rm -rf %s/d/current' % self.env['rpath']) run('mv %s/d/hot %s/d/current; rm -rf %s/%s; ln -s %s/d/current %s/%s' % (self.env['rpath'], self.env['rpath'], self.env['rpath'], self.env['rpdir'], self.env['rpath'], self.env['rpath'], self.env['rpdir']) ) #symlink uploads, media, static files, etc with settings(warn_only=True): run('rm -rf %s/public/media' % self.env['rpath']) run('ln -s %s/%s/media/ %s/public/media' % (self.env['rpath'], self.env['rpdir'], self.env['rpath'])) run('ln -s %s/public/uploads/ %s/%s/media/uploads' % (self.env['rpath'], self.env['rpath'], self.env['rpdir']))
def run(self, *args, **kwargs): """ Load instance from CLI kwargs deploy by HEAD for current branch """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): # check if remote stamp exists in local repo current_instance = utils.commands.read_link(env.current_instance_path) remote_stamp = utils.instance.get_instance_stamp(current_instance) # first deploy to remote if '/' in remote_stamp: print(green('\nFirst deploy to remote.')) # deployed commit is not in your local repository elif remote_stamp and not utils.commands.remote_stamp_in_local_repo(remote_stamp): print(red('\nWarning: deployed commit is not in your local repository.')) # show changed files with `diff` command else: Diff().run() # ask to deploy self.stamp = utils.source.get_head() _args = (utils.source.get_branch_name(), self.stamp) question = '\nDeploy branch %s at commit %s?' % _args if not confirm(yellow(question)): abort(red('Aborted deployment. Run `fab -d %s` for options.' % self.name)) super(Deployment, self).run(*args, **kwargs)
def remote_migrate(app_name): if os.path.exists(os.path.join("./apps", app_name, "migrations")): with settings(warn_only=True): r = local("heroku run python manage.py migrate apps.%s --settings=settings.prod" % (app_name), capture=True) if r.find("django.db.utils.DatabaseError") != -1: print "Normal migration failed. Running a fake migration..." local("heroku run python manage.py migrate apps.%s --settings=settings.prod --fake" % (app_name))
def __deploy(): print "Deploying your application" print "----------------------------" print "Migrations..." for app in enumerate_apps(): local_migrate(app) if is_git_clean(): print "Pushing code on Heroku" local("git push heroku master") else: print "Committing migrations..." local("git add .") local("git commit -a -m '[DHB] data migrations'") print "Sync remote database" remote_syncdb() for app in ["djcelery"]: with settings(warn_only=True): print "Migrating %s ..." % app local("heroku run python manage.py migrate %s --settings=settings.prod" % (app)) for app in enumerate_apps(): remote_migrate(app) print "Transferring static files to S3" collectstatic()
def setup_release_dirs(): """Prepares one or more servers for deployment""" with cd(env.path) and settings(warn_only=True): sudo('virtualenv .;' % env) sudo("mkdir -p %(domain_path)s/{releases,shared,packages}" % env) sudo("mkdir -p %(shared_path)s/{system,logs,index}" % env) permissions()
def test_no_migrate_password(self): user = User() with settings(BCRYPT_MIGRATE=False, BCRYPT_ENABLED_UNDER_TEST=True): _set_password(user, 'password') self.assertSha1(user.password, 'password') self.assertTrue(bcrypt_check_password(user, 'password')) self.assertSha1(user.password, 'password')
def process(request): request.breadcrumbs([("Snort Adapter - Process", "")]) is_ssl_enabled = request.is_secure() update_user_jobs(request.user) feeds = Feed.for_user(request.user) if request.method == 'POST': if 'get_settings' in request.POST: return settings(request, internal_link=True) srcfeed = request.POST['selected_feed_id'].strip() cfg = query_user_settings(request.user) output_fn = adapters.snort_adapter.stix2snort.utils.build_rule_filename(cfg.output_directory, cfg.filename_prefix) result = tasks.run_demo.delay(srcfeed, output_fn, is_ssl_enabled, request.user.username) jobrec = JobInfo() jobrec.created_by = request.user.id jobrec.task_id = result.task_id jobrec.output_file = output_fn jobrec.save() # Check for jobs now to handle the case of a recently added job being nicely formatted joblist = get_joblist(request) return render(request, 'snort_adapter-process.html', {'jobs': joblist, 'feeds': feeds}) else: # check if user has any active jobs # TODO: fill out job model fields # query list of feeds this user_id can access, and pass to template joblist = get_joblist(request) return render(request, 'snort_adapter-process.html', {'feeds': feeds, 'jobs': joblist})
def create_user(username, password, admin='no'): "Create unix user, supply admin=yes for sudo enabled accounts" with settings(user='******',warn_only=True): if admin == 'yes': # Create the admin group and add it to the sudoers file admin_group = 'admin' if 1 == run('egrep -i "^%{group}" /etc/group').format( group=admin_group): run('addgroup {group}'.format(group=admin_group)) run('echo "%{group} ALL=(ALL) ALL" >> /etc/sudoers'.format( group=admin_group)) # Create the new user (default group=username); run('adduser {username} --disabled-password --gecos ""'.format( username=username)) # add to admin group if admin == 'yes': run('adduser {username} {group}'.format( username=username, group=admin_group)) # Set the password for the new admin user run('echo "{username}:{password}" | chpasswd'.format( username=username, password=password))
def urlconf(patterns): """ A context manager that turns URL patterns into the global URLconf. This is useful when you have a local variable of URL patterns that :param patterns: list of `RegexURLPattern` or `RegexURLResolver` (what you get from `patterns`) .. note:: Not thread safe """ NOTSET = object() global urlpatterns # The extravagent effort here to preserve the current value of urlpatterns # is done to ensure nesting of `urlconf`. try: old = urlpatterns except NameError: old = NOTSET urlpatterns = patterns try: with settings(ROOT_URLCONF=__name__): yield finally: if old is NOTSET: del urlpatterns else: urlpatterns = old
def restart_server(memcached_ram=64): """restart apache and memcached on the remote server""" group_writable_permissions() fabhelp.progress("restart apache and memcached") sudo("/etc/init.d/apache2 restart") # get the memcached port from django.conf import settings as django_settings u = urlparse.urlparse(django_settings.CACHE_BACKEND) port = u.port # # this does not work for some very strange reason # sudo("/etc/init.d/memcached restart") # this is basically how the LA Times does it... # https://github.com/datadesk/latimes-fabric-functions/blob/master/fabfile.py with settings(warn_only=True): pid = run("pgrep memcached") if not pid.failed: sudo("kill %s"%pid) sudo("memcached -u www-data -p %s -m %s -d"%( port, memcached_ram, ))
def put_task(): #上传文件任务函数 run("mkdir -p /data/logs") with cd("/data/logs"): with settings(warn_only=True): result = put("/data/logs/access.tar.gz", "/data/logs/access.tar.gz") #put(上传)出现异常时继续执行,非终止 if result.failed and not confirm("put file failed, Continue[Y/N]?"): abort("Aborting file put task!") #出现异常时,确认用户是否继续
def deploy(): """ IN DEVELOPMENT """ code_dir = '/srv/django/myproject' with settings(warn_only=True): if run("test -d %s" % code_dir).failed: run("git clone user@vcshost:/path/to/repo/.git %s" % code_dir) with cd(code_dir): run("git pull") run("touch app.wsgi")
def test_migrate_sha1_to_bcrypt(self): user = User(username='******') with settings(BCRYPT_MIGRATE=True, BCRYPT_ENABLED_UNDER_TEST=True): _set_password(user, 'password') self.assertSha1(user.password, 'password') self.assertTrue(bcrypt_check_password(user, 'password')) self.assertBcrypt(user.password, 'password') self.assertEqual( User.objects.get(username='******').password, user.password)
def destroy_database(func=run): """ Destroys the user and database for this project. Will not cause the fab to fail if they do not exist. """ with settings(warn_only=True): func('dropdb %(project_name)s' % env) func('dropuser %(project_name)s' % env)
def test_no_bcrypt_to_bcrypt(self): user = User(username='******') with settings(BCRYPT_MIGRATE=True, BCRYPT_ROUNDS=10, BCRYPT_ENABLED_UNDER_TEST=True): user.set_password('password') old_password = user.password user.check_password('password') self.assertEqual(old_password, user.password)
def test_migrate_sha1_to_bcrypt(self): user = User(username='******') with settings(BCRYPT_MIGRATE=True, BCRYPT_ENABLED_UNDER_TEST=True): _set_password(user, 'password') self.assertSha1(user.password, 'password') self.assertTrue(bcrypt_check_password(user, 'password')) self.assertBcrypt(user.password, 'password') self.assertEqual(User.objects.get(username='******').password, user.password)
def obj_get(self, request, **kw): if kw['pk'] != 'site': raise ImmediateHttpResponse(response=http.HttpNotFound()) return GenericObject({ # This is the git commit on IT servers. 'version': getattr(settings, 'BUILD_ID_JS', ''), 'flags': waffles(), 'settings': settings(), })
def check_task(): #校验文件任务函数 with settings(warn_only=True): #本地local命令需要配置capture=True才能捕获返回值 lmd5 = local("md5sum /data/logs/access.tar.gz", capture=True).split(' ')[0] rmd5 = run("md5sum /data/logs/access.tar.gz").split(' ')[0] if lmd5 == rmd5: #对比本地及远程的md5信息 print "OK" else: print "ERROR"
def local_migrate(app_name): #TODO: figure out if there are actual models within the app if not os.path.exists(os.path.join("./apps", app_name, "models.py")): return if not os.path.exists(os.path.join("./apps", app_name, "migrations")): with settings(warn_only=True): r = local("python manage.py convert_to_south apps.%s --settings=settings.dev" % app_name, capture=True) if r.return_code != 0: return else: #app has been converted and ready to roll with settings(warn_only=True): r = local("python manage.py schemamigration apps.%s --auto --settings=settings.dev" % app_name) if r.return_code != 0: print "Scema migration return code != 0 -> nothing to migrate" else: local("python manage.py migrate apps.%s --settings=settings.dev" % (app_name))
def redis_stop(context): """Clear data from the *running* local redis server""" redis_cmd = 'docker stop dpcreator-redis' fab_local(redis_cmd) return #redis_cmd = 'pkill -f redis' redis_cmd = 'docker stop dpcreator-redis' with settings(warn_only=True): result = fab_local(redis_cmd, capture=True) if result.failed: print('Nothing to stop')
def read_remote_env(): from fabric.api import settings if env.hosts: ret = {} with hide('output', 'running', 'warnings'), settings(warn_only=True): environment = run("env") for line in environment.split('\r\n'): key, val = line.split('=', 1) ret[key] = val else: ret = os.environ return ret
def setup_project(): "Does lots of the one-time stuff to make a project ready to deploy" #install_packages() with settings(warn_only=True): sudo("mkdir /var/log/gunicorn") sudo("chgrp website /var/log/gunicorn") sudo("chmod 775 /var/log/gunicorn") with settings(warn_only=True): run("mkdir %(project)s" % env) #run("mkdir %(virtualenv)s" % env) run("mkdir %(database)s" % env) run("mkdir %(logs)s" % env) run("mkdir %(media)s" % env) run("mkdir %(static)s" % env) #run("virtualenv --no-site-packages %(virtualenv)s" % env) run("git clone git://%(git_server)s/chriscauley/%(project)s.git %(source_dir)s" % env) update_environment()
def shiva_the_destroyer(): """ Remove all directories, databases, etc. associated with the application. """ with settings(warn_only=True): run('rm -Rf %(path)s' % env) run('rm -Rf %(log_path)s' % env) pgpool_down() run('dropdb %(project_name)s' % env) run('dropuser %(project_name)s' % env) pgpool_up() sudo('rm %(apache_config_path)s' % env) reboot() run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s' % env)
def install_site(): "Add the virtualhost config file to the webserver's config, activate logrotate" require('release', provided_by=[deploy, setup]) with cd('%(path)s/releases/%(release)s' % env): sudo('cp server-setup/%(webserver)s.conf /etc/%(webserver)s/sites-available/%(prj_name)s' % env, pty=True) if env.use_daemontools: # activate new service runner sudo('cp server-setup/service-run.sh /etc/service/%(prj_name)s/run; chmod a+x /etc/service/%(prj_name)s/run;' % env, pty=True) else: # delete old service dir sudo('echo; if [ -d /etc/service/%(prj_name)s ]; then rm -rf /etc/service/%(prj_name)s; fi' % env, pty=True) if env.use_supervisor: # activate new supervisor.ini sudo('cp server-setup/supervisor.ini /etc/supervisor/%(prj_name)s.ini' % env, pty=True) if env.use_celery: sudo('cp server-setup/supervisor-celery.ini /etc/supervisor/%(prj_name)s-celery.ini' % env, pty=True) else: # delete old config file sudo('echo; if [ -f /etc/supervisor/%(prj_name)s.ini ]; then supervisorctl %(prj_name)s:appserver stop rm /etc/supervisor/%(prj_name)s.ini; fi' % env, pty=True) if env.use_celery: sudo('echo; if [ -f /etc/supervisor/%(prj_name)s-celery.ini ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/%(prj_name)s-celery.ini; fi' % env, pty=True) if env.use_celery and env.use_daemontools: sudo('cp server-setup/service-run-celeryd.sh /etc/service/%(prj_name)s-celery/run; chmod a+x /etc/service/%(prj_name)s-celery/run;' % env, pty=True) # try logrotate with settings(warn_only=True): sudo('cp server-setup/logrotate.conf /etc/logrotate.d/website-%(prj_name)s' % env, pty=True) with settings(warn_only=True): sudo('cd /etc/%(webserver)s/sites-enabled/; ln -s ../sites-available/%(prj_name)s %(prj_name)s' % env, pty=True)
def add_dislike(request): dislikeForm = DislikeIngredientForm(request.POST) if dislikeForm.is_valid(): profile = get_object_or_404(Profile, user=request.user) text = dislikeForm.cleaned_data['ingredient'] if not profile.preference.all().filter(name__exact=text): ingredient = Ingredient(name=text) ingredient.save() profile.preference.add(ingredient) profile.save() allergy = get_object_or_404(Allergy, user=request.user) preferenceForm = PreferenceForm(instance=allergy) return settings(request, preferenceForm)
def deploy(): """ Deploy the latest version of the site to the server and restart Apache2. Does not perform the functions of load_new_data(). """ require('settings', provided_by=[production, staging]) require('branch', provided_by=[stable, master, branch]) with settings(warn_only=True): maintenance_up() checkout_latest() gzip_assets() deploy_to_s3() maintenance_down()
class GoogleCloudMediaFileStorage(GoogleCloudStorage): """ Google file storage class which gives a media file path for DEDIA_URL not google generated one """ def __init__(self, *args, **kwargs): if not settings.MEDIA_URL: raise Exception('MEDIA_URL has not been configured') kwargs['bucket_name'] = setting('GS_BUCKET_NAME', strict=True) super(GoogleCloudMediaFileStorage, self).__init__(*args, **kwargs) bucket_name = settings("GS_BUCKET_NAME") def url(self, name): """ GIves correct MEDIA_URL and not google generated url. """ return urljoin(settings.MEDIA_URL, name)
def restart_webserver(): "Restart the web server" require('webserver') env.webport = '8' + run('id -u', pty=True)[1:] with settings(warn_only=True): if env.webserver == 'nginx': require('path') if env.use_daemontools: sudo( 'kill `cat %(path)s/logs/django.pid`' % env, pty=True ) # kill process, daemontools will start it again, see service-run.sh if env.use_supervisor: if env.use_celery: sudo( 'supervisorctl restart %(prj_name)s:appserver celery celerybeat' % env, pty=True) else: sudo('supervisorctl restart %(prj_name)s:appserver' % env, pty=True) #require('prj_name') #run('cd %(path)s; bin/python releases/current/manage.py runfcgi method=threaded maxchildren=6 maxspare=4 minspare=2 host=127.0.0.1 port=%(webport)s pidfile=./logs/django.pid' % env) sudo('/etc/init.d/%(webserver)s reload' % env, pty=True)