Exemple #1
0
def load_bosslets():
    n = lambda c: normalize(c).replace('_', '.')
    bosslets = [n(c) for c in glob(repo_path('config', '*.py'))]
    custom = [n(c) for c in glob(repo_path('config', 'custom', '*.py'))]

    bosslets.extend(custom)
    return bosslets
def load_lambdas_on_s3(session, domain, bucket):
    """Zip up spdb, bossutils, lambda and lambda_utils.  Upload to S3.

    Uses the lambda build server (an Amazon Linux AMI) to compile C code and
    prepare the virtualenv that's ultimately contained in the zip file placed
    in S3.

    Args:
        session (Session): boto3.Session
        domain (string): The VPC's domain name such as integration.boss.
    """
    tempname = tempfile.NamedTemporaryFile(delete=True)
    zipname = tempname.name + '.zip'
    tempname.close()
    print('Using temp zip file: ' + zipname)

    cwd = os.getcwd()
    os.chdir(const.repo_path("salt_stack", "salt", "spdb", "files"))
    zip.write_to_zip('spdb.git', zipname, False)
    os.chdir(cwd)

    os.chdir(const.repo_path("salt_stack", "salt", "boss-tools", "files", "boss-tools.git"))
    zip.write_to_zip('bossutils', zipname)
    zip.write_to_zip('lambda', zipname)
    zip.write_to_zip('lambdautils', zipname)
    os.chdir(cwd)

    with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl:
        # Generate settings.ini file for ndingest.
        create_ndingest_settings(domain, tmpl)

    os.chdir(const.repo_path("salt_stack", "salt", "ndingest", "files"))
    zip.write_to_zip('ndingest.git', zipname)
    os.chdir(cwd)

    print("Copying local modules to lambda-build-server")

    #copy the zip file to lambda_build_server
    lambda_build_server = aws.get_lambda_server(session)
    lambda_build_server_key = aws.get_lambda_server_key(session)
    lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key)
    ssh = SSHConnection(lambda_build_server_key, (lambda_build_server, 22, 'ec2-user'))
    target_file = "sitezips/{}.zip".format(domain)
    ret = ssh.scp(zipname, target_file, upload=True)
    print("scp return code: " + str(ret))

    os.remove(zipname)

    # This section will run makedomainenv on lambda-build-server
    print("calling makedomainenv on lambda-build-server")
    cmd = 'source /etc/profile && source ~/.bash_profile && /home/ec2-user/makedomainenv {} {}'.format(domain, bucket)
    ssh.cmd(cmd)
def update_migrate(bosslet_config, config):
    migration_progress = constants.repo_path("cloud_formation", "configs", "migrations", config, "progress")

    if not os.path.exists(migration_progress):
        console.info("No migrations to apply")
        return

    with open(migration_progress, "r") as fh:
        cur_ver = int(fh.read())

    new_ver = CloudFormationConfiguration(config, bosslet_config).existing_version()

    migrations = MigrationManager(config, cur_ver, new_ver)
    if not migrations.has_migrations:
        console.info("No migrations to apply")
        os.remove(migration_progress)
        return

    def callback(migration_file):
        with open(migration_progress, 'w') as fh:
            fh.write(str(migration_file.stop))

    migrations.add_callback(post=callback)

    migrations.post_update(bosslet_config)

    os.remove(migration_progress)
Exemple #4
0
def load_scenarios():
    scenarios = [
        normalize(s)
        for s in glob(repo_path('cloud_formation', 'scenarios', '*.yml'))
    ]

    return scenarios
Exemple #5
0
def load_cf_configs():
    configs = [
        normalize(c)
        for c in glob(repo_path('cloud_formation', 'configs', '*.py'))
    ]
    configs.append('all')
    configs.remove('__init__')

    return configs
Exemple #6
0
def load_lambda_config(lambda_dir):
    """Load the lambda.yml config file

    Args:
        lambda_dir (str): Name of the directory under cloud_formation/lambda/ that
                          contains the lambda.yml file to load

    Returns:
        dict: Dictionary of configuration file data
    """
    lambda_config = const.repo_path('cloud_formation', 'lambda', lambda_dir, 'lambda.yml')
    with open(lambda_config, 'r') as fh:
        return yaml.full_load(fh.read())
def pre_init(bosslet_config):
    """
    Create NodeJS config file from template.  
    Package NodeJS lambda function.
    Upload .zip to S3 bucket.
    """
    write_config_file(bosslet_config)
    copy_provisioners(bosslet_config)

    # Inject the current region into the lambda code
    region_json = const.repo_path('cloud_formation', 'lambda',
                                  'dynamodb-lambda-autoscale', 'src',
                                  'configuration', 'Region.json')
    with open(region_json, 'w') as fh:
        json.dump({'Region': bosslet_config.REGION}, fh)

    lambdas.load_lambdas_on_s3(
        bosslet_config,
        bosslet_config.names.dynamo_lambda.lambda_,
    )
Exemple #8
0
import os
import pprint
import datetime
import json

from boto3 import Session
from botocore.exceptions import ClientError

import alter_path
from lib import aws
from lib import utils
from lib import hosts
from lib import constants as const
from lib.boto_wrapper import IamWrapper as iw

IAM_CONFIG_DIR = const.repo_path("config", "iam")
DEFAULT_POLICY_FILE = os.path.join(IAM_CONFIG_DIR, "policies.json")
DEFAULT_GROUP_FILE = os.path.join(IAM_CONFIG_DIR, "groups.json")
DEFAULT_ROLES_FILE = os.path.join(IAM_CONFIG_DIR, "roles.json")
COMMANDS=["import", "export"]

class IamUtils:
    def __init__(self, session):
        self.session = session
        self.iam_details = None
        self.iw = iw(session.client("iam"))
        self.policy_keyword_filters = ["-client-policy-"]  # Any keywords in the policy name should be skipped.
        self.policy_whole_filters = ["gion-test-policy", "aplAllowAssumeRoleInProduction",
                                     "aplDenyAssumeRoleInProduction", "aplSpeedTestPolicy", "drenkng1-agents-rw-policy"]
        self.role_keyword_filters = ["cowlehp1"]
        self.role_whole_filters = ["drenkng1-agents-rw-policy", "alexaSkillsRole", "ecrReadOnlyRole", "ecsInstanceRole",
def load_lambdas_on_s3(session, domain, bucket):
    """Zip up spdb, bossutils, lambda and lambda_utils.  Upload to S3.

    Uses the lambda build server (an Amazon Linux AMI) to compile C code and
    prepare the virtualenv that's ultimately contained in the zip file placed
    in S3.

    Args:
        session (Session): boto3.Session
        domain (string): The VPC's domain name such as integration.boss.
    """
    tempname = tempfile.NamedTemporaryFile(delete=True)
    zipname = tempname.name + '.zip'
    tempname.close()
    print('Using temp zip file: ' + zipname)

    cwd = os.getcwd()
    os.chdir(const.repo_path("salt_stack", "salt", "spdb", "files"))
    zip.write_to_zip('spdb.git', zipname, False)
    os.chdir(cwd)

    os.chdir(const.repo_path("salt_stack", "salt", "boss-tools", "files", "boss-tools.git"))
    zip.write_to_zip('bossutils', zipname)
    zip.write_to_zip('lambda', zipname)
    zip.write_to_zip('lambdautils', zipname)
    os.chdir(cwd)

    with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl:
        # Generate settings.ini file for ndingest.
        create_ndingest_settings(domain, tmpl)

    os.chdir(const.repo_path("salt_stack", "salt", "ndingest", "files"))
    zip.write_to_zip('ndingest.git', zipname)
    os.chdir(cwd)

    os.chdir(const.repo_path("lib"))
    zip.write_to_zip('heaviside.git', zipname)

    # Let lambdas look up names by creating a bossnames module.
    zip.write_to_zip('names.py', zipname, arcname='bossnames/names.py')
    zip.write_to_zip('hosts.py', zipname, arcname='bossnames/hosts.py')
    zip.write_to_zip('bucket_object_tags.py', zipname, arcname='bossnames/bucket_object_tags.py')
    zip.write_to_zip('__init__.py', zipname, arcname='bossnames/__init__.py')
    os.chdir(cwd)

    print("Copying local modules to lambda-build-server")

    #copy the zip file to lambda_build_server
    lambda_build_server = aws.get_lambda_server(session)
    lambda_build_server_key = aws.get_lambda_server_key(session)
    lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key)
    ssh = SSHConnection(lambda_build_server_key, (lambda_build_server, 22, 'ec2-user'))
    target_file = "sitezips/{}.zip".format(domain)
    ret = ssh.scp(zipname, target_file, upload=True)
    print("scp return code: " + str(ret))

    os.remove(zipname)

    # This section will run makedomainenv on lambda-build-server
    print("calling makedomainenv on lambda-build-server")
    cmd = 'source /etc/profile && source ~/.bash_profile && /home/ec2-user/makedomainenv {} {}'.format(domain, bucket)
    ssh.cmd(cmd)
# This was an attempt to import CUBOIDSIZE from the spdb repo.  Can't import
# without a compiling spdb's C library.
#
#SPDB_FOLDER = '../salt_stack/salt/spdb/files'
#SPDB_REPO = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER + '/spdb.git'))
#SPDB_LINK = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER + '/spdb'))
# try:
#     os.symlink(SPDB_REPO, SPDB_LINK, True)
#     spdb_dir = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER))
#     sys.path.append(spdb_dir)
#     from spdb.c_lib.c_version.ndtype import CUBOIDSIZE
# finally:
#     os.remove(SPDB_LINK)

# Location of settings files for ndingest.
NDINGEST_SETTINGS_FOLDER = const.repo_path('salt_stack', 'salt', 'ndingest', 'files', 'ndingest.git', 'settings')

# Template used for ndingest settings.ini generation.
NDINGEST_SETTINGS_TEMPLATE = NDINGEST_SETTINGS_FOLDER + '/settings.ini.apl'

def get_lambda_zip_name(domain):
    """Get name of zip file containing lambda.

    This must match the name created in the makedomainenv script that runs on
    the lambda build server.

    Args:
        domain (string): The VPC's domain name such as integration.boss.

    Returns:
        (string)
Exemple #11
0
"""

import argparse
import sys
import os
import glob
import json
import shlex
import subprocess
from distutils.spawn import find_executable
from boto3.session import Session

import alter_path
from lib.constants import repo_path

os.environ["PATH"] += ":" + repo_path("bin") # allow executing Packer from the bin/ directory

def get_commit():
    """Figure out the commit hash of the current git revision.
        Note: Only works if the CWD is a git repository
    Returns:
        (string) : The git commit hash
    """
    cmd = "git rev-parse HEAD"
    result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)
    return result.stdout.decode("utf-8").strip()

def execute(cmd, output_file):
    """Execuit the given command and redirect STDOUT and STDERR to output_file.
    Args:
        cmd (string) : Command to execute
# This was an attempt to import CUBOIDSIZE from the spdb repo.  Can't import
# without a compiling spdb's C library.
#
#SPDB_FOLDER = '../salt_stack/salt/spdb/files'
#SPDB_REPO = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER + '/spdb.git'))
#SPDB_LINK = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER + '/spdb'))
# try:
#     os.symlink(SPDB_REPO, SPDB_LINK, True)
#     spdb_dir = os.path.normpath(os.path.join(cur_dir, SPDB_FOLDER))
#     sys.path.append(spdb_dir)
#     from spdb.c_lib.c_version.ndtype import CUBOIDSIZE
# finally:
#     os.remove(SPDB_LINK)

# Location of settings files for ndingest.
NDINGEST_SETTINGS_FOLDER = const.repo_path('salt_stack', 'salt', 'ndingest', 'files', 'ndingest.git', 'settings')

# Template used for ndingest settings.ini generation.
NDINGEST_SETTINGS_TEMPLATE = NDINGEST_SETTINGS_FOLDER + '/settings.ini.apl'

def get_lambda_zip_name(domain):
    """Get name of zip file containing lambda.

    This must match the name created in the makedomainenv script that runs on
    the lambda build server.

    Args:
        domain (string): The VPC's domain name such as integration.boss.

    Returns:
        (string)
Exemple #13
0
from boto3 import Session
from botocore.exceptions import ClientError

import alter_path
from lib import aws
from lib import utils
from lib import constants as const
from lib.boto_wrapper import IamWrapper
from lib import configuration
from lib import console

REGION_STANDIN = '==region=='
ACCOUNT_STANDIN = '==account=='

IAM_CONFIG_DIR = const.repo_path("config", "iam")
DEFAULT_POLICY_FILE = os.path.join(IAM_CONFIG_DIR, "policies.json")
DEFAULT_GROUP_FILE = os.path.join(IAM_CONFIG_DIR, "groups.json")
DEFAULT_ROLES_FILE = os.path.join(IAM_CONFIG_DIR, "roles.json")
DEFAULT_NAMES_FILE = os.path.join(IAM_CONFIG_DIR, "names.json")
DEFAULT_REMOVED_FILE = os.path.join(IAM_CONFIG_DIR, "removed.json")


class AllResources(object):
    """Class that always returns True for any `obj in AllResources()`"""
    def __contains__(self, value):
        return True


def json_dumps(obj):
    """A wrapper to `json.dumps` that provides the common arguments used
Exemple #14
0
"""

import argparse
import sys
import os
import glob
import json
import shlex
import subprocess
from distutils.spawn import find_executable
from boto3.session import Session

import alter_path
from lib.constants import repo_path

os.environ["PATH"] += ":" + repo_path("bin") # allow executing Packer from the bin/ directory

def get_commit():
    """Figure out the commit hash of the current git revision.
        Note: Only works if the CWD is a git repository
    Returns:
        (string) : The git commit hash
    """
    cmd = "git rev-parse HEAD"
    result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)
    return result.stdout.decode("utf-8").strip()

def execute(cmd, output_file):
    """Execuit the given command and redirect STDOUT and STDERR to output_file.
    Args:
        cmd (string) : Command to execute
Exemple #15
0
    Starting ASG that already is on - 
    Initializing Vault - 
    Unsealing Vault - 
    Exported Vault data not existing - 
    Importing Vault data - 


Stopping:
    An error when stopping ASG that is on - 
    Stopping ASG that is already off - 
    Exporting Vault data - 
    Stopping Auth without RDS - 
"""
KEY_PREFIX = 'boss_switch.'

VAULT_FILE = constants.repo_path('vault', 'private', '{}', 'export.json')


def load_aws(bosslet_config, method):
    suffix = '.' + bosslet_config.INTERNAL_DOMAIN

    client = bosslet_config.session.client('autoscaling')
    response = client.describe_auto_scaling_groups()

    def name(tags):
        for tag in tags:
            if tag['Key'] == 'Name':
                return tag['Value']
        return ""

    asgs = [
Exemple #16
0
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None):
    """Package up the lambda files and send them through the lambda build process
    where the lambda code zip is produced and uploaded to S3

    NOTE: This function is also used to build lambda layer code zips, the only requirement
          for a layer is that the files in the resulting zip should be in the correct
          subdirectory (`python/` for Python libraries) so that when a lambda uses the
          layer the libraries included in the layer can be correctly loaded

    NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to
          'multi_lambda' for backwards compatibility

    Args:
        bosslet_config (BossConfiguration): Configuration object of the stack the
                                            lambda will be deployed into
        lambda_name (str): Name of the lambda, which will be mapped to the name of the
                           lambda directory that contains the lambda's code
        lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that
                          contains the `lambda.yml` configuration file for the lambda

    Raises:
        BossManageError: If there was a problem with building the lambda code zip or
                         uploading it to the given S3 bucket
    """
    # For backwards compatibility build the multi_lambda code zip
    if lambda_name is None and lambda_dir is None:
        lambda_dir = 'multi_lambda'

    # Map from lambda_name to lambda_dir if needed
    if lambda_dir is None:
        try:
            lambda_dir = lambda_dirs(bosslet_config)[lambda_name]
        except KeyError:
            console.error("Cannot build a lambda that doesn't use a code zip file")
            return None

    # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built
    if lambda_dir in BUILT_ZIPS:
        console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir))
        return
    BUILT_ZIPS.append(lambda_dir)

    lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir))
    lambda_config = lambda_dir / 'lambda.yml'
    with lambda_config.open() as fh:
        lambda_config = yaml.full_load(fh.read())

    if lambda_config.get('layers'):
        for layer in lambda_config['layers']:
            # Layer names should end with `layer`
            if not layer.endswith('layer'):
                console.warning("Layer '{}' doesn't conform to naming conventions".format(layer))

            load_lambdas_on_s3(bosslet_config, lambda_dir=layer)

    console.debug("Building {} lambda code zip".format(lambda_dir))

    domain = bosslet_config.INTERNAL_DOMAIN
    tempname = tempfile.NamedTemporaryFile(delete=True)
    zipname = pathlib.Path(tempname.name + '.zip')
    tempname.close()
    console.debug('Using temp zip file: {}'.format(zipname))

    cwd = os.getcwd()

    # Copy the lambda files into the zip
    for filename in lambda_dir.glob('*'):
        zip.write_to_zip(str(filename), zipname, arcname=filename.name)

    # Copy the other files that should be included
    if lambda_config.get('include'):
        for src in lambda_config['include']:
            dst = lambda_config['include'][src]
            src_path, src_file = src.rsplit('/', 1)

            os.chdir(const.repo_path(src_path))

            # Generate dynamic configuration files, as needed
            if src_file == 'ndingest.git':
                with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl:
                    # Generate settings.ini file for ndingest.
                    create_ndingest_settings(bosslet_config, tmpl)

            zip.write_to_zip(src_file, zipname, arcname=dst)
            os.chdir(cwd)

    # Currently any Docker CLI compatible container setup can be used (like podman)
    CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}'

    BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}'
    BUILD_ARGS = {
        'DOMAIN': domain,
        'BUCKET': bosslet_config.LAMBDA_BUCKET,
    }

    # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev
    #          environment instead of the stack's environment. Different maintainer may have different
    #          container commands installed.
    container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER')
    lambda_build_server = bosslet_config.LAMBDA_SERVER
    if lambda_build_server is None:
        staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging'))
        if not staging_target.exists():
            staging_target.mkdir()

        console.debug("Copying build zip to {}".format(staging_target))
        staging_zip = staging_target / (domain + '.zip')
        try:
            zipname.rename(staging_zip)
        except OSError:
            # rename only works within the same filesystem
            # Using the shell version, as using copy +  chmod doesn't always work depending on the filesystem
            utils.run('mv {} {}'.format(zipname, staging_zip), shell=True)

        # Provide the AWS Region and Credentials (for S3 upload) via environmental variables
        env_extras = { 'AWS_REGION': bosslet_config.REGION,
                       'AWS_DEFAULT_REGION': bosslet_config.REGION }

        if container_executable is None:
            BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files')
            CMD = BUILD_CMD.format(**BUILD_ARGS)

            if bosslet_config.PROFILE is not None:
                env_extras['AWS_PROFILE'] = bosslet_config.PROFILE

            console.info("calling build lambda on localhost")
        else:
            BUILD_ARGS['PREFIX'] = '/var/task'
            CMD = BUILD_CMD.format(**BUILD_ARGS)
            CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable,
                                       HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'),
                                       RUNTIME = lambda_config['runtime'],
                                       CMD = CMD)

            if bosslet_config.PROFILE is not None:
                # Cannot set the profile as the container will not have the credentials file
                # So extract the underlying keys and provide those instead
                creds = bosslet_config.session.get_credentials()
                env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key
                env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key

            console.info("calling build lambda in {}".format(container_executable))

        try:
            utils.run(CMD, env_extras=env_extras)
        except Exception as ex:
            raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex))
        finally:
            os.remove(staging_zip)

    else:
        BUILD_ARGS['PREFIX'] = '~'
        CMD = BUILD_CMD.format(**BUILD_ARGS)

        lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY
        lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key)
        ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user')
        bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else []
        ssh = SSHConnection(ssh_target, bastions)

        console.debug("Copying build zip to lambda-build-server")
        target_file = '~/staging/{}.zip'.format(domain)
        ret = ssh.scp(zipname, target_file, upload=True)
        console.debug("scp return code: " + str(ret))

        os.remove(zipname)

        console.info("calling build lambda on lambda-build-server")
        ret = ssh.cmd(CMD)
        if ret != 0:
            raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
"""

import boto3
import configparser
import io
from lib.cloudformation import CloudFormationConfiguration, Arg, Arn, Ref
from lib import aws
from lib import constants as const
from lib import lambdas
import os
import json
import shutil
import subprocess

# Location of repo with the lambda autoscaler.
LAMBDA_ROOT_FOLDER = const.repo_path('cloud_formation', 'lambda',
                                     'dynamodb-lambda-autoscale')

CONFIG_OUTPUT_PATH = os.path.join(LAMBDA_ROOT_FOLDER, 'config.env.production')

CONFIG_INPUT_DIR = const.repo_path('cloud_formation', 'dynamodb-autoscale')
CONFIG_OUTPUT_DIR = os.path.join(LAMBDA_ROOT_FOLDER, 'src', 'configuration')
PROVISIONER_FILENAME = 'BossProvisioners.json'

DYNAMO_LAMBDA_KEY = 'DynamoLambda'
TRIGGER_KEY = 'TriggerDynamoAutoscale'


def create_config(bosslet_config):
    session = bosslet_config.session
    names = bosslet_config.names
    config = CloudFormationConfiguration("dynamolambda", bosslet_config)
Exemple #18
0
def export_path(bosslet_config):
    return const.repo_path('vault', 'private', bosslet_config.names.vault.dns,
                           'export.json')
Exemple #19
0
import alter_path
from lib.constants import repo_path
from lib import configuration
from lib import utils

CONFIGS = [
    "activities",
    "auth",
    "backup",
    "cachemanager",
    "endpoint",
    "vault",
]

os.environ["PATH"] += ":" + repo_path("bin") # allow executing Packer from the bin/ directory

def lambda_ami():
    # Amazon Linux AMI 2018.03.0 (HVM), SSD Volume Type
    # Should match runtime used by AWS Lambda
    #    https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html
    # NOTE: The boss-manage code assumes that this will be an Amazon AMI
    #       that uses the 'ec2-user' user account
    return "ami-0080e4c5bc078760e"

def get_commit():
    """Figure out the commit hash of the current git revision.
        Note: Only works if the CWD is a git repository
    Returns:
        (string) : The git commit hash
    """