Beispiel #1
0
def tacc_jobs():

    # Ensures that there is an appropriate json request
    if not request.is_json:
        return "INVALID: Request is not json"

    proposal = request.get_json()

    if pp.token_test(proposal["token"]) == False:
        return 'Invalid token'

    # Checks the required fields
    req_fields = ["token", "image", "commands", "priority", "username"]
    req_check = l2_contains_l1(req_fields, proposal.keys())

    if req_check != []:
        return "INVALID: Lacking the following json fields to be read: " + ",".join(
            [str(a) for a in req_check])

    [TOKEN, IMAGE, COMMANDS, PRIORITY, USERNAME] = [
        proposal["token"], proposal["image"], proposal["commands"],
        proposal["priority"], proposal["username"]
    ]
    VolCon_ID = uuid.uuid4().hex

    if "gpu" in IMAGE:
        GPU = 1
    else:
        GPU = 0

    try:
        mints.add_job(TOKEN,
                      IMAGE,
                      COMMANDS,
                      GPU,
                      VolCon_ID,
                      PRIORITY,
                      username=USERNAME)
    except:
        return "INVALID: Could not connect to MySQL database"

    # TACC: Image is a TACC image
    job_info = {
        "Image": IMAGE,
        "Command": COMMANDS,
        "TACC": 1,
        "GPU": GPU,
        "VolCon_ID": VolCon_ID,
        "public": 1
    }

    try:
        mirror.upload_job_to_mirror(job_info)
    except:
        return "No VolCon mirrors are connected to this project"

    return "Successfully submitted job"
Beispiel #2
0
def new_job():

    if not request.is_json:
        return "POST parameters could not be parsed"

    ppr = request.get_json()
    ppr_keys = ppr.keys()
    check = l2_contains_l1(["key", "ID", "User", "origin", "Job", "modules", "output_files", "dirname", "sc_system",
                            "sc_queue", "n_cores", "n_nodes", "runtime"], ppr_keys)

    if check:
        return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check])

    key = ppr["key"]
    username = ppr["User"]

    # Allowed access for:
    #   - Systems with orchestra key, springIPT mainly
    #   - Calls from wetty where the user is a TACC user


    invalid_access = True

    if valid_adm_passwd(key):
        invalid_access = False
    else:
        reqip = IP_to_hostname(request.environ['REMOTE_ADDR'])
        r_occupied = redis.Redis(host=URL_BASE, port=6379, password=REDIS_AUTH, db=0)

        # Valid IP and user is in the IP
        if (reqip in redkeys(r_occupied)) and (not (mints.get_ip_port(username, reqip)[1])):
            invalid_access = False

    if invalid_access:
        return "INVALID: Access not allowed"


    job_ID = ppr["ID"]
    origin = ppr["origin"]
    job_type = ppr["Job"]
    modules = ppr["modules"]
    output_files = ppr["output_files"]
    dirname = ppr["dirname"]
    sc_system = ppr["sc_system"]
    sc_queue = ppr["sc_queue"]
    n_cores = ppr["n_cores"]
    n_nodes = ppr["n_nodes"]
    runtime = ppr["runtime"]


    # If the user is not TACC, job cannot be submitted
    [type_of_user, error_in_db] = mints.current_user_status(username)
    greyfish_commonuser_job_loc = "/greyfish/sandbox/DIR_commonuser/jobs_left/"+dirname+".zip"


    if error_in_db:

        # Deletes the job data if it exists
        if os.path.exists(greyfish_commonuser_job_loc):
            os.remove(greyfish_commonuser_job_loc)

        return "User is not logged in correctly"

    if type_of_user[0] == "false":

        # Deletes the job data if it exists
        if os.path.exists(greyfish_commonuser_job_loc):
            os.remove(greyfish_commonuser_job_loc)

        return "User is not authorized to submit jobs"



    if hhmmss_pattern.match(runtime) == None:
        return "INVALID: time mut be specified as HH:MM:SS"

    if job_type not in ["Compile", "Run", "Both"]:
        return "INVALID: Job type not accepted, must be 'Compile', 'Run', or 'Both'"


    # Processes the commands
    if job_type == "Compile":

        check_compile = l2_contains_l1(["CC"], ppr_keys)
        if check_compile:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_compile])

        number_compile_instructions = int(ppr["CC"])
        compile_instruction_tags = ["C"+str(c) for c in range(0, number_compile_instructions)]
        check_compile2 = l2_contains_l1(compile_instruction_tags, ppr_keys)
        if check_compile2:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_compile2])

        compile_instructions = [ppr[c_tag] for c_tag in compile_instruction_tags]
        run_instructions = None

    elif job_type == "Run":
        check_run = l2_contains_l1(["RC"], ppr_keys)
        if check_run:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_run])

        number_run_instructions = int(ppr["RC"])
        run_instruction_tags = ["R"+str(c) for c in range(0, number_run_instructions)]
        check_run2 = l2_contains_l1(run_instruction_tags, ppr_keys)
        if check_run2:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_run2])

        compile_instructions = None
        run_instructions = [ppr[r_tag] for r_tag in run_instruction_tags]

    elif job_type == "Both":
        check_both = l2_contains_l1(["CC", "RC"], ppr_keys)
        if check_both:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_compile])

        number_compile_instructions = int(ppr["CC"])
        compile_instruction_tags = ["C"+str(c) for c in range(0, number_compile_instructions)]
        check_compile2 = l2_contains_l1(compile_instruction_tags, ppr_keys)
        if check_compile2:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_compile2])

        compile_instructions = [ppr[c_tag] for c_tag in compile_instruction_tags]

        number_run_instructions = int(ppr["RC"])
        run_instruction_tags = ["R"+str(c) for c in range(0, number_run_instructions)]
        check_run2 = l2_contains_l1(run_instruction_tags, ppr_keys)
        if check_run2:
            return "INVALID: Lacking the following json fields to be read: "+",".join([str(a) for a in check_run2])

        run_instructions = [ppr[r_tag] for r_tag in run_instruction_tags]


    # Adds the data to a json file if using the web interface
    # Warning, only the directory with same name will be added
    if origin == "web":
        GREYFISH_DIR = "/greyfish/sandbox/DIR_commonuser/jobs_left/"
        zip_location = GREYFISH_DIR+dirname+".zip"

        os.mkdir(GREYFISH_DIR+dirname)
        os.chdir(GREYFISH_DIR+dirname)

        with zipfile.ZipFile(zip_location, 'r') as zf:
            zf.extractall(".")

        # Must have a directory with the same name as the zipfile
        dirs_inside = [adir for adir in os.listdir('.') if os.path.isdir(GREYFISH_DIR+dirname+"/"+adir)]

        if len(dirs_inside) != 1:
            return "INVALID: "+str(len(dirs_inside)+" were provided, only one directory containing all the necessary data can be inside the zip file")

        only_dir_with_info = dirs_inside[0]
        os.rename(only_dir_with_info, dirname)

        for item in os.listdir('.'):

            if item == dirname:
                continue

            if os.path.isdir(item):
                shutil.rmtree(item)
            else:
                # for files
                os.remove(item)


        web_data_to_json_file.json_to_file(dirname, ppr)

        os.remove(zip_location)
        shutil.make_archive(GREYFISH_DIR+dirname, "zip", ".")

        os.chdir(CURDIR)
        shutil.rmtree(GREYFISH_DIR+dirname)


    mints.add_job(job_ID, username, compile_instructions, run_instructions, job_type, origin, modules, output_files, dirname,
                    sc_system, sc_queue, n_cores, n_nodes)

    return "New job added to database"
Beispiel #3
0
def process_web_jobs():

    if request.method != 'POST':
       return "INVALID, no data provided"  


    try:
        dictdata = request.get_json()
    except:
        return "INVALID, JSON could not be parsed"


    try:
        TOK = dictdata["Token"]
        Reefil = dictdata["Files"]
        Image = dictdata["Image"]
        Custom = dictdata["Custom"]
        Command = dictdata["Command"]
        Username = dictdata["Username"]

        if "priority" not in dictdata.keys():
            PRIORITY = "Middle"
        else:
            PRIORITY = dictdata["priority"]
    except:
        return "INVALID, json lacks at least one field (keys: Token, Boapp, Files, Image, Custom, Command, Username)"

    if pp.token_test(TOK) == False:
        return "INVALID token"

    # Checks the list of Commands to ensure their correct execution
    Command = ';'.join([C for C in Command.split(';') if (C != '') and (C.count(C[0]) != len(C)) ]) +';'

    boapp = "boinc2docker"
    if Image == "carlosred/gpu:cuda":
        boapp = "volcon"

    if (Custom != "Yes" ) and (Custom != "No"):
        return abort(422) # Incorrect API

    if not image_is_TACC(Image):
        Custom = "Yes"
        # Gets the tags
        try:
            tags_used = [x.strip() for x in dictdata["topics"].split(";") if x.strip() != ""]

            if tags_used == []:
                tags_used = "STEM"
            else:
                tags_used = ",".join(tags_used)
                tags_used = tags_used.lower()

        except Exception as e:
            print(e)
            # Error in processing json
            tags_used = "STEM"

    else:
        # Default tag: STEM
        tags_used = "STEM"


    if boapp == "boinc2docker":
        # Writes the commands to a random file
        new_filename = pp.random_file_name()

        Complete_command = ""

        # Custom images require more work because it must be ensured the results will be back
        if Custom == "Yes":
            # Creates a new working directory
            Complete_command += "mkdir -p /data; cd /data; "
            # Adds the files
            for FF in Reefil:

                # Skips unnecessary files
                if FF == "":
                    break
                Complete_command += get_reef_file(Image, TOK, FF)+" "

            Complete_command += Command+" mkdir -p /root/shared/results/; mv ./* /root/shared/results"

        elif Custom == "No":
            # Adds the files
            for FF in Reefil:

                # Skips unnecessary files
                if FF == "":
                    break
                Complete_command += get_reef_file(Image, TOK, FF)+" "

            Complete_command += Command +" mv ./* /root/shared/results"


        # Replaces certain characters
        Complete_command = " /bin/bash -c \""+sanitize_str_chars(Complete_command)+"\""
        mints.add_boinc2docker_job(Username, TOK, tags_used, Image, Complete_command, boapp, "web", "Job submitted")

        # Old way
        # shutil.move(UPLOAD_FOLDER+new_filename, BOINC_FOLDER+new_filename)


    if boapp == "volcon":
        
        # Only CUDA requires a GPU
        # Custom images are also assumed to not require a GPU TODO TODO TODO TODO
        if Image == "carlosred/gpu:cuda":
            GPU = 1
        else:
            GPU = 0

        VolCon_ID = uuid.uuid4().hex

        COMMANDS = ""

        if Custom == "Yes":
            TACC = 0
            COMMANDS += "mkdir -p /data; cd /data; "
            for FF in Reefil:
                if FF == '':
                    break
                COMMANDS += get_reef_file(Image, TOK, FF)+" "

            # Splits the commands and ensures that they are run in /data
            newcoms = ";".join(["cd /data && "+x for x in Command.split(";")])

            COMMANDS += newcoms+" mkdir -p /root/shared/results/; mv /data/* /root/shared/results"

        else:
            TACC = 1
            for FF in Reefil:
                if FF == '':
                    break
                COMMANDS += get_reef_file(Image, TOK, FF)+" "
            COMMANDS += ";".join([extra_image_commands(Image) +z for z in Command.split(";")])+" mv ./* /root/shared/results"

        COMMANDS = " /bin/bash -c \""+COMMANDS+"\""

        job_info = {"Image":Image, "Command":COMMANDS, "TACC":TACC, "GPU":GPU, "VolCon_ID":VolCon_ID, "public":1}

        # Updates the job in the database
        mints.add_job(TOK, Image, COMMANDS, GPU, VolCon_ID, PRIORITY, 1, tags_used, Username, "web")
        # Pushes job information to mirrors
        mirror.upload_job_to_mirror(job_info)


    return "Commands submitted for processing"
Beispiel #4
0
def complete_build(IMTAG, UTOK, MIDIR, COMMAND_TXT, DOCK_DOCK, BOCOM, FILES_PATH='.'):

    researcher_email = pp.obtain_email(UTOK)
    try:
        user_image(IMTAG)

        # Reduces the corresponding user's allocation
        # Docker represents image size in GB
        # Moves the file
        boapp = r.get(UTOK+';'+MIDIR).decode("UTF-8")
        
        if boapp == "boinc2docker":
            shutil.move(COMMAND_TXT+".txt", "/home/boincadm/project/html/user/token_data/process_files/"+COMMAND_TXT+".txt")


        # VolCon instructions
        # Deletes the image, submits the saved version to a mirror

        if boapp == "volcon":
            
            # Deletes the key
            r.delete(UTOK+';'+MIDIR)

            # Saves the image into a file
            img = image.get(IMTAG)
            resp = img.save()
            random_generated_dir = hashlib.sha256(str(datetime.datetime.now()).encode('UTF-8')).hexdigest()[:4:]
            image_dir = os.getcwd()+"/"+random_generated_dir
            os.mkdir(image_dir)
            full_image_path = image_dir+"/image.tar.gz"
            with open(full_image_path, "wb") as ff:
                for salmon in resp:
                    ff.write(salmon)

            VolCon_ID = uuid.uuid4().hex
            mirror_IP = mirror.get_random_mirror()

            # Move image to the mirror
            mirror.upload_file_to_mirror(full_image_path, mirror_IP, VolCon_ID)

            # Moves the file to where it belongs
            saved_name = "image_"+hashlib.sha256(str(datetime.datetime.utcnow()).encode('UTF-8')).hexdigest()[:4:]+".tar.gz"
            shutil.move(full_image_path, saved_name)

            # Move commands to mirror
            Commands = " ".join(BOCOM.split(" ")[1::])

            # Add job to VolCon
            # Set as medium priority
            mints.add_job(UTOK, "Custom", Commands, 0, VolCon_ID, "Middle", public=0)
            mints.update_mirror_ip(VolCon_ID, mirror_IP)

            # MIDAS cannot accept GPU jobs
            job_info = {"Image":"Custom", "Command":Commands, "TACC":0, "GPU":0,
                        "VolCon_ID":VolCon_ID, "public":0, "key":mirror.mirror_key(mirror_IP)}
            
            requests.post('http://'+mirror_IP+":7000/volcon/mirror/v2/api/public/receive_job_files",
                json=job_info)

            # Moves data to Reef
            requests.post('http://'+os.environ['Reef_IP']+':2001/reef/result_upload/'+os.environ['Reef_Key']+'/'+UTOK,
                    files={"file": open(saved_name, "rb")})

            # Deletes local copy
            os.remove(saved_name)
            # Removes the image
            container.prune()
            image.remove(IMTAG, force=True)

            # Email user with dockerfile
            MESSAGE = Success_Message.replace("DATETIME", datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
            MESSAGE += "\n\nClick on the following link to obtain a compressed version of the application docker image.\n"
            MESSAGE += "You are welcome to upload the image on dockerhub in order to reduce the future job processing time for the same application (no allocation will be discounted): \n"
            MESSAGE += os.environ["SERVER_IP"]+":5060/boincserver/v2/reef/results/"+UTOK+"/"+saved_name.replace("../", "")
            MESSAGE += "\n\nDownload the image using the following command:\n"
            MESSAGE += "curl -O "+os.environ["SERVER_IP"]+":5060/boincserver/v2/reef/results/"+UTOK+"/"+saved_name.replace("../", "")
            MESSAGE += "\nThen load the image (sudo permission may be required):"
            MESSAGE += "\ndocker load < "+saved_name.replace("../", "")
            MESSAGE += "\nThe image ID will appear, which can then be used to create a container (sudo permission may be required):"
            MESSAGE += "\ndocker run -it IMAGE_ID bash"
            MESSAGE += "\n\nRun the following command on the image: \n"+' '.join(BOCOM.split(' ')[1::])
            MESSAGE += "\n\nThis is the Dockerfile we used to process your job: \n\n"+DOCK_DOCK

            ec.send_mail_complete(researcher_email, "Succesful MIDAS build", MESSAGE, [])

            return None


        # Deletes the key
        r.delete(UTOK+';'+MIDIR)

        # Saves the docker image and sends the user the dockerfile and a link to the tar ball
        # docker-py documentation was erronous

        img = image.get(IMTAG)
        resp = img.save()

        # Creates a file, recycled everytime the program runs
        saved_name = "image."+hashlib.sha256(str(datetime.datetime.now()).encode('UTF-8')).hexdigest()[:4:]+".tar.gz"
        ff = open(saved_name, 'wb')
        for salmon in resp:
            ff.write(salmon)
        ff.close()

        # Moves the file to reef and deletes the local copy
        requests.post('http://'+os.environ['Reef_IP']+':2001/reef/result_upload/'+os.environ['Reef_Key']+'/'+UTOK, files={"file": open(saved_name, "rb")})
        os.remove(saved_name)
        MESSAGE = Success_Message.replace("DATETIME", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        MESSAGE += "\n\nClick on the following link to obtain a compressed version of the application docker image.\n"
        MESSAGE += "You are welcome to upload the image on dockerhub in order to reduce the future job processing time for the same application (no allocation will be discounted): \n"
        MESSAGE += os.environ["SERVER_IP"]+":5060/boincserver/v2/reef/results/"+UTOK+"/"+saved_name
        MESSAGE += "\n\nDownload the image using the following command:\n"
        MESSAGE += "curl -O "+os.environ["SERVER_IP"]+":5060/boincserver/v2/reef/results/"+UTOK+"/"+saved_name.replace("../", "")
        MESSAGE += "\nThen load the image (sudo permission may be required):"
        MESSAGE += "\ndocker load < "+saved_name.replace("../", "")
        MESSAGE += "\nThe image ID will appear, which can then be used to create a container (sudo permission may be required):"
        MESSAGE += "\ndocker run -it IMAGE_ID bash"
        MESSAGE += "\n\nRun the following command on the image: \n"+' '.join(BOCOM.split(' ')[1::])
        MESSAGE += "\n\nThis is the Dockerfile we used to process your job: \n\n"+DOCK_DOCK
        ec.send_mail_complete(researcher_email, "Succesful MIDAS build", MESSAGE, [])

    except Exception as e:
        print(e)
        r.delete(UTOK+';'+MIDIR)
        # Deletes the unused container
        client.containers.prune()
        MESSAGE = Failure_Message.replace("DATETIME", datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
        MESSAGE += "\n\nDockerfile created below: \n\n"+DOCK_DOCK
        ec.send_mail_complete(researcher_email, "Succesful MIDAS build", MESSAGE, [])