def run_command(args): cmd = args.cmd if not cmd: print "Please enter command to run. Example: kitrun.py remotecmd \"ls -l\"" return conf = get_config() service_name = conf.deployable['deployable_name'] tier = conf.tier['tier_name'] region = conf.tier['aws']['region'] ssh_key_name = conf.tier['aws']['ssh_key'] ssh_key_file = '~/.ssh/{}.pem'.format(ssh_key_name) print "\n*** EXECUTING REMOTE COMMAND '{}' ON SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(cmd, service_name, tier, region) instances = get_ec2_instances(region, tier, service_name) for ec2 in instances: ip_address = ec2.private_ip_address print "*** Running '{}' on {}...".format(cmd, ip_address) env.host_string = ip_address env.user = EC2_USERNAME env.key_filename = ssh_key_file run(cmd) print
def drift_app(app): # Find application root and initialize paths and search path # for module imports app_root = _find_app_root() sys.path.append(app_root) app.instance_path = app_root app.static_folder = os.path.join(app_root, 'static') # Trigger loading of drift config conf = get_config() app.config.update(conf.drift_app) log.info("Configuration source: %s", conf.source) _apply_patches(app) # Install apps, api's and extensions. install_modules(app) # TODO: Remove this or find a better place for it if not app.debug: log.info("Flask server is running in RELEASE mode.") else: log.info("Flask server is running in DEBUG mode.") try: from flask_debugtoolbar import DebugToolbarExtension DebugToolbarExtension(app) except ImportError: log.info("Flask DebugToolbar not available: Do 'pip install " "flask-debugtoolbar' to enable.") return app
def get_provider_config(provider_name): conf = get_config() row = conf.table_store.get_table('platforms').find({ 'product_name': conf.product['product_name'], 'provider_name': provider_name }) return len(row) and row[0]['provider_details'] or None
def run_command(args): conf = get_config() deployable_name = conf.deployable['deployable_name'] tier = conf.tier['tier_name'] region = conf.tier['aws']['region'] ssh_key_name = conf.tier['aws']['ssh_key'] ssh_key_file = '~/.ssh/{}.pem'.format(ssh_key_name) print "\n*** VIEWING LOGS FOR SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(deployable_name, tier, region) instances = get_ec2_instances(region, tier, deployable_name) if args.host: instances = [i for i in instances if i.private_ip_address == args.host] print "Gathering logs from '%s' on the following instances:" % UWSGI_LOGFILE for inst in instances: print " %s" % inst.private_ip_address if args.stream and len(instances) > 1: print "The --stream argument can only be used on a single host. Please use --host to pick one" return for ec2 in instances: ip_address = ec2.private_ip_address print "*** Logs in {} on {}...".format(UWSGI_LOGFILE, ip_address) if not args.stream: env.host_string = ip_address env.user = EC2_USERNAME env.key_filename = ssh_key_file cmd = "sudo tail {} -n 100".format(UWSGI_LOGFILE) if args.grep: cmd += " | grep {}".format(args.grep) run(cmd) print else: import paramiko import select client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) key_path = os.path.expanduser(ssh_key_file) client.connect(ip_address, username=EC2_USERNAME, key_filename=key_path) channel = client.get_transport().open_session() grep_cmd = "" if args.grep: grep_cmd = " | grep --line-buffered {}".format(args.grep) channel.exec_command("sudo tail -f {}{}".format(UWSGI_LOGFILE, grep_cmd)) while True: if channel.exit_status_ready(): break rl, wl, xl = select.select([channel], [], [], 0.0) if len(rl) > 0: sys.stdout.write(channel.recv(1024))
def xxxxcreate_command(args): tenant_name = args.tenant if not tenant_name: tenants_report() return os.environ['DRIFT_DEFAULT_TENANT'] = tenant_name # Minor hack: from drift.flaskfactory import load_flask_config try: conf = get_drift_config( tier_name=get_tier_name(), tenant_name=tenant_name, drift_app=load_flask_config(), ) except TenantNotConfigured as e: raise except Exception as e: print Fore.RED + "'tenant {}' command failed: {}".format( args.action, e) return if not args.action: tenant_report(conf) return if args.action in ['create', 'recreate']: # Provision resources with TSTransaction() as ts: conf = get_config(ts=ts) resources = conf.drift_app.get("resources") for module_name in resources: m = importlib.import_module(module_name) if hasattr(m, "provision"): provisioner_name = m.__name__.split('.')[-1] print "Provisioning '%s' for tenant '%s' on tier '%s'" % ( provisioner_name, tenant_name, conf.tier['tier_name']) if 0: # THIS IS BONKERS LOGIC! FIIIIX! conf.tier['resource_defaults'].append({ 'resource_name': provisioner_name, 'parameters': getattr(m, 'NEW_TIER_DEFAULTS', {}), }) recreate = 'recreate' if args.action == 'recreate' else 'skip' m.provision(conf, {}, recreate=recreate) row = ts.get_table('tenants').get(conf.tenant) row['state'] = 'active' tenant_report(conf)
def tenants_report(tenant_name=None): conf = get_config() if not tenant_name: print "The following active tenants are registered in config on tier '{}' for deployable '{}:".format( conf.tier['tier_name'], conf.deployable['deployable_name']) criteria = { 'tier_name': conf.tier['tier_name'], 'deployable_name': conf.deployable['deployable_name'], 'state': 'active' } if tenant_name: criteria['tenant_name'] = tenant_name active_tenants = conf.table_store.get_table('tenants').find(criteria) for tenant in active_tenants: if 'postgres' not in tenant: sys.stdout.write( "No postgres resource available for tenant {}.".format( tenant["tenant_name"])) continue postgres_params = process_connection_values(tenant['postgres']) sys.stdout.write( "{} for {} on {}/{}... ".format(tenant["tenant_name"], tenant["deployable_name"], postgres_params['server'], postgres_params['database']), ) db_err = db_check(tenant['postgres']) if db_err: print Fore.RED + "Error: %s" % db_err else: print Fore.GREEN + " OK! Database is online and reachable" print "To view more information about each tenant run this command again with the tenant name"
def run_command(args): conf = get_config() service_name = conf.deployable['deployable_name'] tier = conf.tier['tier_name'] region = conf.tier['aws']['region'] ssh_key_name = conf.tier['aws']['ssh_key'] ssh_key_file = '~/.ssh/{}.pem'.format(ssh_key_name) include_drift = args.drift if args.tiername and args.tiername != tier: print "Default tier is '{}' but you expected '{}'. Quitting now.".format( tier, args.tiername) return if conf.tier['is_live'] and tier != args.tiername: print "You are quickdeploying to '{}' which is a protected tier.".format( tier) print "This is not recommended!" print "If you must do this, and you know what you are doing, state the name of" print "the tier using the --deploy-to-this-tier argument and run again." return project_folders = ["."] if include_drift: import drift drift_folder = os.path.abspath(os.path.join(drift.__file__, '..', '..')) project_folders.append(drift_folder) def deploy(distros): shell_scripts = [] for project_folder in project_folders: print "Creating source distribution from ", project_folder cmd = [ "python", os.path.join(project_folder, "setup.py"), "sdist", "--formats=zip", "--dist-dir=" + distros, ] p = subprocess.Popen(cmd, cwd=project_folder, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = p.communicate() if p.returncode != 0: print stdout sys.exit(p.returncode) # Use custom quickdeploy script if found. # Prefix them with environment variables: header = "#!/bin/bash\n" header += "export DRIFT_SERVICE_NAME={}\n".format(service_name) if 'PORT' in conf.drift_app: header += "export DRIFT_PORT={}\n".format( conf.drift_app['PORT']) header += "export UWSGI_LOGFILE={}\n\n".format(UWSGI_LOGFILE) quickdeploy_script_file = os.path.join(project_folder, "scripts/quickdeploy.sh") if os.path.exists(quickdeploy_script_file): print "Using quickdeploy.sh from this project." else: print "Using standard quickdeploy.sh from Drift library" # Use standard quickdeploy script. Only works for web stacks. quickdeploy_script_file = pkg_resources.resource_filename( __name__, "quickdeploy.sh") with open(quickdeploy_script_file, 'r') as f: src = header + f.read().replace("#!/bin/bash", "") shell_scripts.append(src) for ec2 in get_ec2_instances(region, tier, service_name): if args.ip and ec2.private_ip_address != args.ip: print "Skipping ", ec2.private_ip_address continue env.host_string = ec2.private_ip_address env.user = EC2_USERNAME env.key_filename = ssh_key_file for dist_file in os.listdir(distros): print "Installing {} on {}".format(dist_file, ec2.private_ip_address) full_name = os.path.join(distros, dist_file) with settings(warn_only=True): # Remove the previous file forcefully, if needed run("sudo rm -f {}".format(dist_file)) put(full_name) cmd = "sudo -H pip install {} --upgrade".format(dist_file) if args.skiprequirements: cmd += " --no-deps" if args.forcereinstall: cmd += " --force-reinstall" run(cmd) # Run pip install on the requirements file. if not args.skiprequirements: cmd = "unzip -p {} {}/requirements.txt | xargs -n 1 -L 1 sudo pip install".format( dist_file, os.path.splitext(dist_file)[0]) run(cmd) # Minor hack: if service_name in dist_file: cmd = "sudo sh -c 'unzip -p {} {}/config/config.json > /etc/opt/{}/config/config.json'".format( dist_file, os.path.splitext(dist_file)[0], service_name) run(cmd) print "Running quickdeploy script on {}".format( ec2.private_ip_address) for shell_script in shell_scripts: with settings(warn_only=True): run(shell_script) # todo: see if this needs to be done as well: ## _set_ec2_tags(ec2, deployment_manifest, "drift:manifest:") # Wrap the business logic in RAII block distros = tempfile.mkdtemp(prefix='drift.quickdeploy.') try: deploy(distros) finally: shutil.rmtree(distros)
def setup_tenant(): """ Called from individual test modules. create a tenant only if the test module was not called from the kitrun's systest command (in which case drift_test_database has been set in environ) Also configure some basic parameters in the app Returns the config object from get_config() """ global _tenant_is_set_up if _tenant_is_set_up: tenant_name = driftconfig.testhelpers.get_name('tenant') conf = get_config(tenant_name=tenant_name) return conf _tenant_is_set_up = True # Always assume local servers os.environ['DRIFT_USE_LOCAL_SERVERS'] = '1' # TODO: Refactor deployable name logic once it's out of flask config. from drift.flaskfactory import load_flask_config driftconfig.testhelpers.DEPL_NAME = str(load_flask_config()['name']) ts = driftconfig.testhelpers.create_test_domain() set_sticky_config(ts) # Create a test tenant tier_name = driftconfig.testhelpers.get_name('tier') tenant_name = driftconfig.testhelpers.get_name('tenant') os.environ['DRIFT_TIER'] = tier_name os.environ['DRIFT_DEFAULT_TENANT'] = tenant_name conf = get_config(tenant_name=tenant_name) # Fixup tier defaults conf.tier['resource_defaults'] = [ ] conf.tier['service_user'] = { "password": "******", "username": "******" } # Provision resources resources = conf.drift_app.get("resources") for module_name in resources: m = importlib.import_module(module_name) if hasattr(m, "provision"): provisioner_name = m.__name__.split('.')[-1] log.info("Provisioning '%s' for tenant '%s' on tier '%s'", provisioner_name, tenant_name, tier_name) conf.tier['resource_defaults'].append({ 'resource_name': provisioner_name, 'parameters': getattr(m, 'NEW_TIER_DEFAULTS', {}), }) m.provision(conf, {}, recreate='recreate') # mixamix from drift.appmodule import app app.config['TESTING'] = True return conf
def run_command(args): service = args.service conf = get_config() tier_name = conf.tier['tier_name'] region = conf.tier['aws']['region'] ssh_key_name = conf.tier['aws']['ssh_key'] deployables = conf.table_store.get_table('deployables').find( {"tier_name": tier_name}) deployables = {depl["deployable_name"]: depl for depl in deployables} if service is None: print "Select an instance to connect to:" for k in sorted(deployables.keys()): print " ", k return elif service not in deployables: print "Warning! Service or deployable '{}' not one of {}.".format( service, ", ".join(deployables.keys())) ssh_key_file = '~/.ssh/{}.pem'.format(ssh_key_name) ssh_key_file = ssh_key_file.replace('~', os.path.expanduser('~')) # Get IP address of any instance of this deployable. sess = boto3.session.Session(region_name=region) ec2 = sess.client("ec2") filters = [ { "Name": "instance-state-name", "Values": ["running"] }, { "Name": "tag:tier", "Values": [tier_name] }, { "Name": "tag:service-name", "Values": [service] }, ] print "Getting a list of EC2's from AWS matching the following criteria:" for criteria in filters: print " {} = {}".format(criteria["Name"], criteria["Values"][0]) ret = ec2.describe_instances(Filters=filters) instances = [] for res in ret["Reservations"]: instances += res["Instances"] if not instances: print "No instance found which matches the criteria." return print "Instances:" inst = instances[0] for i, ins in enumerate(instances): lb = [ tag["Value"] for tag in ins["Tags"] if tag["Key"] == "launched-by" ] or ["n/a"] print " {}: {} at {} launched by {} on {}".format( i + 1, ins["InstanceId"], ins["PrivateIpAddress"], lb[0], ins["LaunchTime"]) if len(instances) > 1: which = raw_input( "Select an instance to connect to (or press enter for first one): " ) if which: inst = instances[int(which) - 1] else: print "Only one instance available. Connecting to it immediately.." ip_address = inst["PrivateIpAddress"] cd_cmd = "" if service in deployables: cd_cmd = 'cd /usr/local/bin/{}; exec bash --login'.format(service) cmd = [ "ssh", "ubuntu@{}".format(ip_address), "-i", ssh_key_file, "-t", cd_cmd ] print "\nSSH command:", " ".join(cmd) p = subprocess.Popen(cmd) stdout, _ = p.communicate() if p.returncode != 0: print stdout sys.exit(p.returncode)
def run_command(args): pick_tests = [] if args.tests: pick_tests = [t.lower() for t in args.tests.split(",")] print "Picking tests {}".format(pick_tests) # Set up a mock tenant so we can bootstrap the app and inspect # the modules within. setup_tenant() conf = get_config() test_modules = [] for app in conf.drift_app['apps']: m = importlib.import_module(app) path = dirname(m.__file__) tests_path = os.path.join(path, "tests") if not os.path.exists(tests_path): print "No tests found for app '{}'".format(app) continue if not os.path.exists(os.path.join(tests_path, "__init__.py")): print "No tests found for app '{}' (missing __init__.py)".format(app) continue n = 0 for filename in os.listdir(tests_path): if filename.endswith(".py") and not filename.startswith("__"): test_module_name = app + ".tests." + filename[:-3] test_modules.append(test_module_name) n += 1 print "app '{}' has {} test modules".format(app, n) suites = {} for module_name in test_modules: # first import it to see if we get any errors m = importlib.import_module(module_name) suites[module_name] = unittest.defaultTestLoader.loadTestsFromName(module_name) tests_to_run = [] tests_to_skip = [] for module_name, suite in suites.iteritems(): for test_cases in suite: for t in test_cases: if pick_tests: for p in pick_tests: if p in str(t).lower(): tests_to_run.append(t) else: tests_to_skip.append(t) else: tests_to_run.append(t) print "Running {} test(s) from {} module(s)".format(len(tests_to_run), len(suites)) print "Skipping {} test(s)".format(len(tests_to_skip)) if pick_tests: print "Just running the following tests:" if not tests_to_run: print " No tests found!" for t in tests_to_run: print " {}".format(t) if args.preview: return test_suite = unittest.TestSuite(tests_to_run) verbosity = 1 if args.verbose: verbosity = 2 if not args.logging: logging.disable(logging.WARNING) cls = unittest.TextTestRunner if is_running_under_teamcity and TeamcityTestRunner: if is_running_under_teamcity(): cls = TeamcityTestRunner results = cls(verbosity=verbosity, failfast=args.failfast).run(test_suite) # if a tenant was not specified on the commandline we destroy it if not results.wasSuccessful(): sys.exit(1)
def __call__(self, *args, **kwargs): with app.app_context(): g.conf = get_config() return TaskBase.__call__(self, *args, **kwargs)
def test_get_static_data(self): from driftbase.staticdata.handlers import DATA_URL, INDEX_URL, CDN_LIST from drift.appmodule import app self.auth() endpoint = self.endpoints.get('static_data') self.assertIsNotNone(endpoint, "'static_data' endpoint not registered.") # Fudge the config a bit get_config().tenant["static_data_refs_legacy"] = { "repository": "borko-games/the-ossomizer", "revision": "refs/heads/developmegood", } ref1 = {"commit_id": "abcd", "ref": "refs/heads/developmegood"} ref2 = {"commit_id": "c0ffee", "ref": "refs/tags/v0.1.4"} # Make "S3" respond as such: def mock_s3_response(): responses.add( responses.GET, '{}borko-games/the-ossomizer/index.json'.format(INDEX_URL), body=json.dumps({"index": [ref1, ref2]}), status=200, content_type='application/json') mock_s3_response() resp = self.get(endpoint).json() # There should be at least one entry in the static_data_urls pointing to developmegood urls = resp.get("static_data_urls") self.assertIsNotNone(urls, "The 'static_data_urls' key is missing") self.assertTrue( len(urls) > 0, "There should be at least one entry in 'static_data_urls'.") self.assertEqual( urls[0]["data_root_url"], u"{}{}/data/{}/".format(DATA_URL, "borko-games/the-ossomizer", "abcd")) self.assertEqual(urls[0]["origin"], "Tenant config") self.assertEqual(urls[0]["commit_id"], ref1["commit_id"], "I should have gotten the default ref.") # Now we test the pin thing, first without the server set to honor it. mock_s3_response() resp = self.get(endpoint + "?static_data_ref=refs/tags/v0.1.4").json() # There should be at least one entry in the static_data_urls pointing to developmegood urls = resp.get("static_data_urls") self.assertIsNotNone(urls, "The 'static_data_urls' key is missing") self.assertTrue( len(urls) > 0, "There should be at least one entry in 'static_data_urls'.") self.assertEqual(urls[0]["origin"], "Tenant config") self.assertEqual(urls[0]["commit_id"], ref1["commit_id"], "I should have gotten the default ref.") # Turn on pin feature get_config( ).tenant["static_data_refs_legacy"]["allow_client_pin"] = True mock_s3_response() resp = self.get(endpoint + "?static_data_ref=refs/tags/v0.1.4").json() urls = resp.get("static_data_urls") self.assertEqual(urls[0]["origin"], "Client pin") self.assertEqual(urls[0]["commit_id"], ref2["commit_id"], "I should have gotten the pinned ref.") # Test cdn list test_root = 'http://test-cdn.com/the/root' CDN_LIST.append(['test-cdn', test_root]) mock_s3_response() resp = self.get(endpoint).json() urls = resp.get('static_data_urls') cdns = { cdn_entry['cdn']: cdn_entry['data_root_url'] for cdn_entry in urls[0]['cdn_list'] } self.assertIn('test-cdn', cdns) self.assertTrue(cdns['test-cdn'].startswith(test_root)) # Make sure the cdn entry matches the master url test_url_tail = cdns['test-cdn'].replace(test_root, '') self.assertTrue(urls[0]['data_root_url'].endswith(test_url_tail))