Exemplo n.º 1
0
def dynamic_jinja_to_static_ruamel_yaml(filename):
    tmp_file = 'tmp.ruamel_yaml'

    converted_file_string, compilers_in_build = convert_jinja_syntax(filename)

    data = ruamel_yaml.round_trip_load(converted_file_string)
    with open(tmp_file, 'w') as fp:
        ruamel_yaml.round_trip_dump(data, fp)

    environment = jinja2.Environment(
        loader=jinja2.FileSystemLoader(searchpath='.'),
        trim_blocks=True,
        block_start_string='#%',
        block_end_string='%#',
        variable_start_string='<{',
        variable_end_string='}>')

    try:
        recipe_dict = environment.get_template(tmp_file).render()
        os.remove("tmp.ruamel_yaml")
        return recipe_dict, compilers_in_build
    except:
        print("ERROR in dynamic_jinja_to_static_ruamel_yaml")
        return ""
Exemplo n.º 2
0
def yaml_round_trip_dump(object):
    """dump object to string"""
    return yaml.round_trip_dump(
        object, block_seq_indent=2, default_flow_style=False, indent=2
    )
Exemplo n.º 3
0
def write_template(method, image, command, workdir, parallel, **kwargs):
    with open(
            f"{os.path.dirname(os.path.realpath(__file__))}/kubernetes-template.yaml"
    ) as ifile:
        doc = ruamel_yaml.round_trip_load(ifile, preserve_quotes=True)

        double = kwargs.get('double', 'OFF')
        rdtscp = kwargs.get('rdtscp', 'OFF')
        orca_method_file = kwargs.get('orca_method_file', '')
        arch = kwargs.get('arch', '')

        # Set default values
        default_image = GMX_IMAGE
        default_name = "gromacs"
        if method == "orca":
            default_image = ORCA_IMAGE
            default_name = "orca"

        # Always replace "" with "-" because "" is not kubernetes accepted char in the name
        method = method.replace("_", "-")

        # Set names
        timestamp = str(time.time()).replace(".", "")
        identificator = "{}-{}-rdtscp-{}".format(default_name, method,
                                                 timestamp)
        doc['metadata']['name'] = identificator
        doc['spec']['template']['spec']['containers'][0][
            'name'] = "{}-{}-deployment-{}".format(default_name, method,
                                                   timestamp)
        doc['spec']['template']['metadata']['labels']['app'] = identificator

        # Set gromacs args
        doc['spec']['template']['spec']['containers'][0]['args'] = [
            "/bin/bash", "-c",
            DoubleQuotedScalarString(command)
        ]

        # If not orca, set options for gmx container
        if method != "orca":
            double_env = {
                'name': "GMX_DOUBLE",
                'value': DoubleQuotedScalarString("ON" if double else "OFF")
            }
            rdtscp_env = {
                'name': "GMX_RDTSCP",
                'value': DoubleQuotedScalarString("ON" if rdtscp else "OFF")
            }
            arch_env = {
                'name': "GMX_ARCH",
                'value': DoubleQuotedScalarString(arch)
            }
            doc['spec']['template']['spec']['containers'][0]['env'] = [
                double_env, rdtscp_env, arch_env
            ]

        # If parallel is enabled set label so kubectl logs can print logs according to label
        if parallel:
            with open(f"{PICKLE_PATH}/lock.pkl", "rb") as fp:
                lock_object = pickle.load(fp)
            if len(lock_object['Parallel_label']) == 0:
                label = {"Parallel_label": identificator, "Count": 0}
                with open(f"{PICKLE_PATH}/lock.pkl", "wb") as fp:
                    pickle.dump(label, fp)
            else:
                doc['spec']['template']['metadata']['labels'][
                    'app'] = lock_object['Parallel_label']

        # Set image
        doc['spec']['template']['spec']['containers'][0][
            'image'] = default_image if not image else image

        # Set working directory
        doc['spec']['template']['spec']['containers'][0][
            'workingDir'] = "/tmp/"
        if workdir:
            doc['spec']['template']['spec']['containers'][0][
                'workingDir'] += workdir

        # set PVC
        pvc_name = os.environ['PVC_NAME']
        if len(pvc_name) == 0:
            raise Exception(
                "Error setting pvc_name, probably problem in setting env variable of actual container"
            )
        doc['spec']['template']['spec']['volumes'][0]['persistentVolumeClaim'][
            'claimName'] = pvc_name

        # Set orca required cpus
        if method == "orca":
            no_of_procs = get_no_of_procs(orca_method_file)
            if no_of_procs != -1:
                doc['spec']['template']['spec']['containers'][0]['resources'][
                    'requests']['cpu'] = no_of_procs

        # Write to file
        ofile_name = "{}-{}-rdtscp.yaml".format(default_name, method)
        with open(ofile_name, "w") as ofile:
            ruamel_yaml.round_trip_dump(doc, ofile, explicit_start=True)

        return ofile_name, identificator
Exemplo n.º 4
0
def make_meta_file_from_dict(recipe_dict, path_to_meta_file):
    with open(path_to_meta_file, "w") as meta_file:
        ruamel_yaml.round_trip_dump(recipe_dict, meta_file)
    # Remove all quotes around {{ compiler (..) }}, that was
    # generated by the round_trip_dump
    remove_quotes_in_yaml(path_to_meta_file)
def handler(event, context):
    s3 = boto3.resource('s3')
    exists = True
    success = False

    try:
        print "Trying to download this file..."
        s3.Bucket('name-of-s3-bucket').download_file('main_prod.yml',
                                                     '/tmp/main_prod.yml')
        # S3 buckets can be the same or different - depends on the paths of the relevant files
        s3.Bucket('name-of-s3-bucket').download_file('new_params.yml',
                                                     '/tmp/new_params.yml')
        print "Success"
    except botocore.exceptions.ClientError as e:
        # If a client error is thrown, then check that it was a 404 error.
        # If it was a 404 error, then the bucket does not exist.
        error_code = int(e.response['Error']['Code'])
        print e.response['Error']
        if error_code == 404:
            exists = False
            print e.response['Error']

    # Load file from local source
    with open('/tmp/main_prod.yml') as stream:
        main_prod = yaml.safe_load(stream)
    print "Opened local /tmp/main_prod.yml file"
    # Load new ImageIds file
    with open('/tmp/new_params.yml') as stream2:
        new_params = yaml.safe_load(stream2)
    print "Opened local /tmp/new_params.yml file"

    # Function for finding the 'ImageId' in nested dictionary
    def findImageId(d):
        # Create empty list
        temp = []
        # For every role
        for i in d['RoleParams']:
            # Find the 'ImageId' property
            if 'ImageId' in d['RoleParams'][i]:
                # Output both the role name and the value of the 'ImageId' associated with it
                temp.append(i)
                temp.append(d['RoleParams'][i]['ImageId'])
        return temp

    # Lists all roles and associated AMI IDs
    originalIds = findImageId(main_prod)
    print "Found the Image Ids for main_prod"
    # Lists all roles that need updating (which should be all entries)
    newIds = findImageId(new_params)
    print "Found the Image Ids for new_params"

    # Convert lists to dictionaries
    i = iter(newIds)
    newIds = dict(izip(i, i))
    print "Made the new_params list into a dict"

    j = iter(originalIds)
    # Change name to 'updatedIds' for comprehension
    updatedIds = dict(izip(j, j))
    print "Made the oroginal list into a new dict ready for updating"

    # Change the AMI IDs for those that need updating
    # For every key in the mergedIds dictionary
    for i in updatedIds:
        # For every key in the newIds dictionary
        for j in newIds:
            # If these keys match up
            if i == j:
                # Then update the ID
                updatedIds[i] = newIds[j]

    print "IDs successfully updated"

    # Update new ImageIds and write to updated file
    # Set data and indentation parameters for orignal YAML file
    config, ind, bsi = lygi(open('/tmp/main_prod.yml'))
    print "Setting indentation ready for updating YAML file..."

    # Variable name for data inside the 'RoleParams' key (i.e. ignoring 'BaseParams')
    RoleParams = config['RoleParams']
    print "Let's ignore the BaseParams section to update"

    # For every role, update the ImageIds
    for i in RoleParams:
        # Find the roles that need updating
        for key in updatedIds:
            if key == i:
                # Update the ImageId
                RoleParams[i]['ImageId'] = updatedIds[key]

    print "We have updated the IDs whilst keeping the YAML structure intact"

    # Write to new YAML file
    ryaml.round_trip_dump(config,
                          open('/tmp/main_prod.yml', 'w'),
                          indent=ind,
                          block_seq_indent=bsi)
    print "Saved the new YAML file locally as /tmp/main_prod.yml"

    # Putting data into a file in S3
    exists2 = True
    try:
        s3.meta.client.upload_file('/tmp/main_prod.yml', 'name-of-s3-bucket',
                                   'output.yml')
        print "Uploaded the file to S3 bucket!"
        success = True
    except botocore.exceptions.ClientError as e:
        # If a client error is thrown, then check that it was a 404 error.
        # If it was a 404 error, then the bucket does not exist.
        error_code = int(e.response['Error']['Code'])
        print e.response['Error']
        success = False
        if error_code == 404:
            exists = False
            print e.response['Error']

    return success