Beispiel #1
0
def fetch_learning(request):
    import json
    query_string = request.GET['hash'] + ',' + request.GET['type'] + ',' + str(get_config('env', 'cpu')) \
                   + ',' + str(get_config('env', 'memory')) + ',' + str(os_to_int())
    api_bus = get_config('program', 'api',
                         1) + '/Index/share/q/' + query_string
    try:
        res_data = urlopen(
            Request(api_bus, headers={"User-Agent":
                                      "BioQueue-client"})).read()
        res = json.loads(res_data.read())
        session_dict = {
            'hash': request.GET['hash'],
            'type': request.GET['type'],
            'a': res['a'],
            'b': res['b'],
            'r': res['r'],
        }
        request.session['learning'] = session_dict
        template = loader.get_template('ui/fetch_learning.html')
        context = {
            'step': res,
        }
        return success(template.render(context))
    except Exception as e:
        return error(api_bus)
Beispiel #2
0
def delete_upload_file(request, f):
    file_path = os.path.join(
        get_config('env', 'workspace'),
        str(request.user.queuedb_profile_related.delegate.id),
        base64.b64decode(f).decode())
    delete_file(file_path)
    fm_path = os.path.join(get_config('env', 'workspace'), "file_comment", f)
    if os.path.exists(fm_path):
        delete_file(fm_path)

    return success('Deleted')
Beispiel #3
0
def show_workspace_files(user_id, special_type='uploads', block_files=None):
    import time
    user_files = []
    if special_type != "uploads" and special_type != "refs":
        user_path = special_type
    else:
        user_path = os.path.join(get_config('env', 'workspace'), str(user_id),
                                 special_type)

    if not os.path.exists(user_path):
        os.makedirs(user_path)

    for root, dirs, files in os.walk(user_path):
        for file_name in files:
            file_full_path = os.path.join(root, file_name)

            subs = ""
            if root != user_path:
                subs = root.replace(user_path + "/", "")
            tmp = dict()
            tmp['name'] = os.path.join(subs, file_name)
            tmp['file_size'] = os.path.getsize(file_full_path)
            tmp['file_create'] = time.ctime(os.path.getctime(file_full_path))
            tmp['trace'] = base64.b64encode(
                os.path.join(special_type, tmp['name']).encode()).decode()
            tmp['raw'] = base64.b64encode(tmp['name'].encode()).decode()
            if block_files is not None and tmp['raw'] not in block_files:
                user_files.append(tmp)
    user_files = sorted(user_files, key=lambda user_files: user_files['name'])
    return user_files
Beispiel #4
0
def show_upload_files(request, special_type='uploads'):
    user_path = os.path.join(
        get_config('env', 'workspace'),
        str(request.user.queuedb_profile_related.delegate.id), special_type)
    user_files = []
    if not os.path.exists(user_path):
        try:
            os.makedirs(user_path)
        except Exception as e:
            return render(request, 'ui/error.html', {'error_msg': e})

    for root, dirs, files in os.walk(user_path):
        for file_name in files:
            warn_msg = []
            file_full_path = os.path.join(root, file_name)
            file_name_warning = 0
            if file_full_path.find(" ") != -1:
                file_name_warning = 1
                warn_msg.append("White space found in the file path")
            else:
                try:
                    file_full_path.encode("ascii")
                except UnicodeEncodeError:
                    file_name_warning = 1
                    warn_msg.append("Non-ASCII code found in the file path")
            file_path = file_full_path.replace(user_path + '\\', '') \
                .replace(user_path + '/', '').replace(user_path, '')
            user_files.append(
                (file_path, file_name_warning, ";".join(warn_msg)))
    context = {'user_files': sorted(user_files, key=lambda x: x[0])}
    return render(request, 'ui/show_uploads.html', context)
Beispiel #5
0
def handle_uploaded_file(f):
    import os
    file_name = os.path.join(get_config('env', 'batch_job'),
                             rand_sig() + '.txt')
    with open(file_name, 'wb+') as destination:
        for chunk in f.chunks():
            destination.write(chunk)
    return file_name
Beispiel #6
0
def delete_job_file(request, f):
    file_path = os.path.join(
        get_config('env', 'workspace'),
        str(request.user.queuedb_profile_related.delegate.id),
        base64.b64decode(f).decode())
    delete_file(file_path)

    return success('Deleted')
Beispiel #7
0
def check_disk_quota_lock(user):
    disk_limit = get_config('env', 'disk_quota')
    if disk_limit:
        if get_user_folder_size(user) < int(disk_limit):
            return 1
        else:
            return 0
    else:
        return 1
Beispiel #8
0
def get_disk_quota_info(user):
    try:
        disk_pool = int(get_config('env', 'disk_quota'))
        disk_used = get_user_folder_size(user)
        disk_perc = int(round(disk_used / disk_pool * 100))
    except Exception as e:
        print(e)
        disk_pool = disk_used = disk_perc = 0
    return disk_pool, disk_used, disk_perc
Beispiel #9
0
def create_protocol(request):
    import hashlib
    if request.method == 'POST':
        protocol_form = CreateProtocolForm(request.POST)
        if protocol_form.is_valid():
            try:
                cd = protocol_form.cleaned_data
                if ProtocolList.objects.filter(name=cd['name'], user=request.user.queuedb_profile_related.delegate).exists():
                    return error('Duplicate record!')
                protocol = ProtocolList(name=cd['name'], description=cd['description'],
                                        user=request.user.queuedb_profile_related.delegate)
                protocol.save()
                softwares = request.POST.getlist('software', '')
                parameters = request.POST.getlist('parameter', '')
                version_checks = request.POST.getlist('version_check', '')
                virtual_environments = request.POST.getlist('env', '')
                steps = []
                try:
                    protocol_id_trace = ProtocolList.objects.get(id=protocol.id)
                except Exception as e:
                    return error(e)
                for index, software in enumerate(softwares):
                    if parameters[index]:
                        m = hashlib.md5((software + ' ' + parameters[index].strip()).encode())
                        env = virtual_environments[index]
                        if env != "":
                            try:
                                env = VirtualEnvironment.objects.get(id=int(env))
                            except:
                                env = None
                        else:
                            env = None
                        steps.append(Step(software=software,
                                              parameter=parameters[index],
                                              version_check=version_checks[index],
                                              env=env,
                                              parent=protocol_id_trace,
                                              hash=m.hexdigest(),
                                              step_order=index + 1,
                                              user=request.user.queuedb_profile_related.delegate))
                Step.objects.bulk_create(steps)
                archive_protocol(request, protocol_id_trace.id)
                return success('Your protocol have been created!')
            except Exception as e:
                return error(e)
        else:
            return error(str(protocol_form.errors))
    else:
        if request.user.is_superuser:
            available_env = VirtualEnvironment.objects.all()
        else:
            available_env = VirtualEnvironment.objects.filter(user=request.user.queuedb_profile_related.delegate).all()
        context = {'api_bus': get_config('program', 'api', 1),
                   'user_envs': available_env, }
        return render(request, 'ui/add_protocol.html', context)
Beispiel #10
0
def delete_job_file_tree(request, f):
    try:
        if f is not None and f != "":
            user_dir = os.path.join(
                get_config('env', 'workspace'),
                str(request.user.queuedb_profile_related.delegate.id))
            job_path = os.path.join(user_dir, f)
            import shutil
            if os.path.exists(job_path) and not os.path.samefile(
                    user_dir, job_path):
                shutil.rmtree(job_path, ignore_errors=True)

    except Exception as e:
        print(e)
Beispiel #11
0
def query_usage(request):
    """
    This function should only be called when the user is using IE8 or IE9
    :param request:
    :return:
    """
    api_bus = get_config(
        'program', 'api',
        1) + '/Kb/findSoftwareUsage?software=' + request.POST['software']
    try:
        req = Request(api_bus, headers={"User-Agent": "BioQueue-client"})
        res = urlopen(req).read()
        return HttpResponse(res.decode("utf-8"))
    except Exception as e:
        return error(api_bus)
Beispiel #12
0
def show_job_log(request):
    if request.method == 'POST':
        query_job_form = QueryJobLogForm(request.POST)
        if query_job_form.is_valid():
            cd = query_job_form.cleaned_data
            suffix = ".log" if cd["std_out"] else ".err"
            log_path = os.path.join(get_config('env', 'log'),
                                    str(cd['job']) + suffix)
            try:
                from worker.bases import get_job_log
                return success(get_job_log(log_path))
            except Exception as e:
                return error(e)
        else:
            return error(str(query_job_form.errors))
    else:
        return error('Method error')
Beispiel #13
0
def send_file_as_reference(request, f):
    user_workspace = os.path.join(
        get_config('env', 'workspace'),
        str(request.user.queuedb_profile_related.delegate.id))
    file_path = os.path.join(user_workspace,
                             base64.b64decode(f.replace('f/', '')).decode())
    ref_folder = os.path.join(user_workspace, 'refs')
    if not os.path.exists(file_path) or not os.path.isfile(file_path):
        return error('Cannot find the file.')

    if not os.path.exists(ref_folder) or not os.path.isdir(ref_folder):
        try:
            os.makedirs(ref_folder)
        except Exception as e:
            return error(e)

    import shutil
    shutil.move(file_path, ref_folder)
    return success("")
Beispiel #14
0
def create_reference_shortcut(request):
    reference_form = CreateReferenceForm(request.POST)
    if reference_form.is_valid():
        cd = reference_form.cleaned_data
        if cd['source'] == 'upload' or cd['source'] == 'job':
            file_path = os.path.join(
                get_config('env', 'workspace'),
                str(request.user.queuedb_profile_related.delegate.id),
                base64.b64decode(cd['path']))
            ref = Reference(
                name=cd['name'],
                path=file_path,
                description=cd['description'],
                user=request.user.queuedb_profile_related.delegate,
            )
            ref.save()
            return success(ref.id)
    else:
        return error(str(reference_form.errors))
Beispiel #15
0
def download_file(request, job_id, f):
    try:
        if job_id != 0:
            job = Job.objects.get(id=job_id)
            file_path = os.path.join(
                job.run_dir,
                str(request.user.queuedb_profile_related.delegate.id),
                base64.b64decode(f.replace('f/', '').encode()).decode())
        else:
            file_path = os.path.join(
                get_config("env", "workspace"),
                str(request.user.queuedb_profile_related.id),
                base64.b64decode(f.replace('f/', '').encode()).decode())
        if os.path.exists(file_path):
            return download(file_path)
        else:
            return render(request, "ui/error.html",
                          {"error_msg": "Cannot locate the file"})
    except Exception as e:
        return render(request, "ui/error.html", {"error_msg": str(e)})
Beispiel #16
0
def clean_dead_folder(request):
    from shutil import rmtree
    jobs_containers = os.path.join(get_config('env', 'workspace'), str(request.user.queuedb_profile_related.delegate.id))
    job_results = set(
        [re["result"] for re in Job.objects.filter(user_id=str(request.user.queuedb_profile_related.delegate.id)).values('result')])
    protected_folders = set(["refs", "bin", "uploads", "archives", "OVERRIDE_UPLOAD"])
    death_counter = 0
    failed = 0
    if os.path.exists(jobs_containers):
        for job_result in os.listdir(jobs_containers):
            if job_result not in job_results and job_result not in protected_folders:
                death_counter += 1
                try:
                    abs_path = os.path.join(jobs_containers, job_result)
                    if os.path.isdir(abs_path):
                        rmtree(abs_path)
                except:
                    failed += 1
    return success("%d/%d dead folder detected. %d of them failed to be cleaned." % (death_counter,
                                                                                     len(job_results),
                                                                                     failed))
Beispiel #17
0
def import_protocol_by_fetch(request):
    if request.method == 'POST':
        form = FetchRemoteProtocolForm(request.POST)
        if form.is_valid():
            try:
                api_bus = get_config('program', 'api', 1) + '/Protocol/exportProtocolStdout?sig=' + request.POST['uid']
                try:
                    req = Request(api_bus, headers={"User-Agent": "BioQueue-client"})
                    protocol_raw = urlopen(req).read()
                    import json
                    import hashlib
                    protocol_json = json.loads(protocol_raw.decode("utf-8"))
                    if ProtocolList.objects.filter(name=protocol_json['name'],
                                                   user=request.user.queuedb_profile_related.delegate).exists():
                        return error('Duplicate record!')
                    protocol = ProtocolList(name=protocol_json['name'], user=request.user.queuedb_profile_related.delegate)
                    protocol.save()
                    steps = []
                    predictions = []
                    try:
                        protocol_id_trace = ProtocolList.objects.get(id=protocol.id)
                    except Exception as e:
                        return error(e)
                    for step in protocol_json['step']:
                        m = hashlib.md5((step['software'] + ' ' + step['parameter'].strip()).encode())
                        # m.update(step['software'] + ' ' + step['parameter'].strip())
                        steps.append(Step(software=step['software'],
                                              parameter=step['parameter'],
                                              parent=protocol_id_trace,
                                              hash=m.hexdigest(),
                                              step_order=step['step_order'],
                                              user=request.user.queuedb_profile_related.delegate))
                        if 'cpu_a' in step.keys() and 'cpu_b' in step.keys() and 'cpu_r' in step.keys():
                            if 'cpu' in step.keys():
                                if step['cpu'] != get_config('env', 'cpu'):
                                    continue
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=3).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['cpu_a'],
                                                              b=step['cpu_b'],
                                                              r=step['cpu_r'],
                                                              type=3,
                                                              step_hash=m.hexdigest()))
                        if 'mem_a' in step.keys() and 'mem_b' in step.keys() and 'mem_r' in step.keys():
                            if 'mem' in step.keys():
                                if step['mem'] != get_config('env', 'mem'):
                                    continue
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=2).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['mem_a'],
                                                              b=step['mem_b'],
                                                              r=step['mem_r'],
                                                              type=2,
                                                              step_hash=m.hexdigest()))
                        if 'vrt_a' in step.keys() and 'vrt_b' in step.keys() and 'vrt_r' in step.keys():
                            if 'mem' in step.keys():
                                if step['mem'] != get_config('env', 'mem'):
                                    continue
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=2).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['vrt_a'],
                                                              b=step['vrt_b'],
                                                              r=step['vrt_r'],
                                                              type=4,
                                                              step_hash=m.hexdigest()))
                        if 'disk_a' in step.keys() and 'disk_b' in step.keys() and 'disk_r' in step.keys():
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=1).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['disk_a'],
                                                              b=step['disk_b'],
                                                              r=step['disk_r'],
                                                              type=1,
                                                              step_hash=m.hexdigest()))

                    Step.objects.bulk_create(steps)
                    if len(predictions):
                        Prediction.objects.bulk_create(predictions)
                    ref_list = list()
                    for key, value in enumerate(protocol_json['reference']):
                        try:
                            _ = Reference.objects.get(name=value['name'], user=request.user.queuedb_profile_related.delegate)
                            ref_list.append({'name': value['name'],
                                             'description': value['description'],
                                             'status': 1, })
                        except:
                            ref_list.append({'name': value['name'],
                                             'description': value['description'],
                                             'status': 0, })
                    from django.template import RequestContext, loader
                    template = loader.get_template('ui/import_protocol.html')
                    return success(template.render({'ref_list': ref_list}))
                except Exception as e:
                    return error(e)
            except Exception as e:
                return error(e)
        else:
            return error(form.errors)
    else:
        return error('Error method')
Beispiel #18
0
def build_plain_protocol(request, protocol_id, stand_alone=True):
    """
    Convert protocol into a plain format
    :param request:
    :param protocol_id: int, protocol id
    :return: int or string, 1 means no such a protocol, 2 means no permission to access the protocol
    """
    protocol_data = dict()
    try:
        protocol_parent = ProtocolList.objects.get(id=protocol_id)
    except ProtocolList.DoesNotExist:
        # protocol doesn't exist
        return '', 1
    if protocol_parent.check_owner(request.user.queuedb_profile_related.delegate):
        steps = Step.objects.filter(parent=protocol_parent.id)
    else:
        return '', 2

    protocol_data['name'] = protocol_parent.name
    protocol_data['step'] = []
    for step in steps:
        tmp = {
            'software': step.software,
            'parameter': step.parameter,
            'hash': step.hash,
            'step_order': step.step_order,
        }
        protocol_data['step'].append(tmp)
    if stand_alone:
        references = {}
        reference_list = Reference.objects.filter(user=request.user.queuedb_profile_related.delegate).all()
        protocol_ref = {}
        # references.extend([reference.name for reference in reference_list])
        for reference in reference_list:
            references[reference.name] = reference.description

        wildcard_pattern = re.compile("\\{\\{(.*?)\\}\\}", re.IGNORECASE | re.DOTALL)
        for i, step in enumerate(steps):
            try:
                for wildcard in re.findall(wildcard_pattern, step.parameter):
                    wildcard = wildcard.split(':')[0]
                    if wildcard in references.keys() and wildcard not in protocol_ref.keys():
                        protocol_ref[wildcard] = references[wildcard]
                equations = Prediction.objects.filter(step_hash=step.hash)
                cpu_a = cpu_b = cpu_r = mem_a = mem_b = mem_r = disk_a = disk_b = disk_r = vrt_a = vrt_b = vrt_r = 0
                for equation in equations:
                    if equation.type == 1:
                        disk_a = equation.a
                        disk_b = equation.b
                        disk_r = equation.r
                    elif equation.type == 2:
                        mem_a = equation.a
                        mem_b = equation.b
                        mem_r = equation.r
                    elif equation.type == 3:
                        cpu_a = equation.a
                        cpu_b = equation.b
                        cpu_r = equation.r
                    elif equation.type == 4:
                        vrt_a = equation.a
                        vrt_b = equation.b
                        vrt_r = equation.r
                protocol_data['step'][i]['cpu'] = get_config('env', 'cpu')
                protocol_data['step'][i]['memory'] = get_config('env', 'memory')
                protocol_data['step'][i]['os'] = os_to_int()
                protocol_data['step'][i]['cpu_a'] = cpu_a
                protocol_data['step'][i]['cpu_b'] = cpu_b
                protocol_data['step'][i]['cpu_r'] = cpu_r
                protocol_data['step'][i]['mem_a'] = mem_a
                protocol_data['step'][i]['mem_b'] = mem_b
                protocol_data['step'][i]['mem_r'] = mem_r
                protocol_data['step'][i]['vrt_a'] = vrt_a
                protocol_data['step'][i]['vrt_b'] = vrt_b
                protocol_data['step'][i]['vrt_r'] = vrt_r
                protocol_data['step'][i]['disk_a'] = disk_a
                protocol_data['step'][i]['disk_b'] = disk_b
                protocol_data['step'][i]['disk_r'] = disk_r
            except:
                pass
            # protocol_data['step'].append(tmp)
            protocol_data['reference'] = protocol_ref
    return protocol_data['name'], build_json_protocol(protocol_data)
Beispiel #19
0
def file_support(request):
    if request.method == "GET":
        fs_form = FileSupportForm(request.GET)
        if fs_form.is_valid():
            cd = fs_form.cleaned_data
            real_file = base64.b64decode(cd["file"]).decode()
            _, ext = os.path.splitext(real_file)
            from ..tools import get_maintenance_protocols
            protocol_name = "%s (%s, %s)" % (cd["support"], cd["exp"], ext)
            try:
                protocol_record = ProtocolList.objects.get(
                    name=protocol_name,
                    user_id=request.user.queuedb_profile_related.delegate.id)
            except ProtocolList.DoesNotExist:
                # build protocol
                if request.user.queuedb_profile_related.delegate.id == 0:
                    return error("Log in first.")
                protocol_parent = ProtocolList(
                    name=protocol_name,
                    user_id=request.user.queuedb_profile_related.delegate.id)
                protocol_parent.save()
                steps = list()
                maintenance_protocols = get_maintenance_protocols()
                step_order = 1
                if cd["support"] == "gg":
                    if ext != ".gz":
                        model = __import__("ui.maintenance_protocols.compress",
                                           fromlist=["gzip"])
                    else:
                        model = __import__(
                            "ui.maintenance_protocols.decompress",
                            fromlist=["gunzip"])
                else:
                    if cd["support"] in maintenance_protocols:
                        model = __import__("ui.maintenance_protocols." +
                                           cd["support"],
                                           fromlist=[cd["support"]])
                    else:
                        return error("No support found.")
                step_order, sub_steps = model.get_sub_protocol(
                    Step, protocol_parent, step_order)
                for sub_step in sub_steps:
                    steps.append(sub_step)

                try:
                    Step.objects.bulk_create(steps)
                    protocol_record = protocol_parent
                except:
                    protocol_parent.delete()
                    return error('Fail to save the protocol.')
            job = Job(
                protocol_id=protocol_record.id,
                parameter=';',
                run_dir=get_config('env', 'workspace'),
                user=request.user.queuedb_profile_related.delegate,
                input_file="{{Uploaded:%s}}" % real_file,
                job_name="%s-%s" % (cd['support'], real_file),
            )
            try:
                job.save()
                return success('Push the task into job queue.')
            except:
                return error('Fail to save the job.')
        else:
            return error(str(fs_form.errors))
Beispiel #20
0
def import_protocol(request):
    if request.method == 'POST':
        form = BatchJobForm(request.POST, request.FILES)
        if form.is_valid():
            file_name = handle_uploaded_file(request.FILES['job_list'])
            try:
                with open(file_name) as f:
                    protocol_raw = f.read()
                    import json
                    protocol_json = json.loads(protocol_raw)
                    if ProtocolList.objects.filter(name=protocol_json['name'],
                                                   user=request.user.queuedb_profile_related.delegate).exists():
                        return error('Duplicate record!')
                    protocol = ProtocolList(name=protocol_json['name'], user=request.user.queuedb_profile_related.delegate)
                    protocol.save()
                    steps = []
                    predictions = []
                    try:
                        protocol_id_trace = ProtocolList.objects.get(id=protocol.id)
                    except Exception as e:
                        return error(e)
                    for step in protocol_json['step']:
                        m = hashlib.md5((step['software'] + ' ' + step['parameter'].strip()).encode())
                        # m.update(step['software'] + ' ' + step['parameter'].strip())
                        steps.append(Step(software=step['software'],
                                              parameter=step['parameter'],
                                              parent=protocol_id_trace,
                                              hash=m.hexdigest(),
                                              step_order=step['step_order'],
                                              user=request.user.queuedb_profile_related.delegate))
                        if 'cpu_a' in step.keys() and 'cpu_b' in step.keys() and 'cpu_r' in step.keys():
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=3).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['cpu_a'],
                                                              b=step['cpu_b'],
                                                              r=step['cpu_r'],
                                                              type=3,
                                                              step_hash=m.hexdigest()))
                        if 'mem_a' in step.keys() and 'mem_b' in step.keys() and 'mem_r' in step.keys():
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=2).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['mem_a'],
                                                              b=step['mem_b'],
                                                              r=step['mem_r'],
                                                              type=2,
                                                              step_hash=m.hexdigest()))
                        if 'vrt_a' in step.keys() and 'vrt_b' in step.keys() and 'vrt_r' in step.keys():
                            if 'mem' in step.keys():
                                if step['mem'] != get_config('env', 'mem'):
                                    continue
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=2).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['vrt_a'],
                                                              b=step['vrt_b'],
                                                              r=step['vrt_r'],
                                                              type=4,
                                                              step_hash=m.hexdigest()))
                        if 'disk_a' in step.keys() and 'disk_b' in step.keys() and 'disk_r' in step.keys():
                            if Prediction.objects.filter(step_hash=m.hexdigest(), type=1).exists():
                                continue
                            else:
                                predictions.append(Prediction(a=step['disk_a'],
                                                              b=step['disk_b'],
                                                              r=step['disk_r'],
                                                              type=1,
                                                              step_hash=m.hexdigest()))

                    Step.objects.bulk_create(steps)
                    archive_protocol(request, protocol.id)
                    if len(predictions):
                        Prediction.objects.bulk_create(predictions)
                    return HttpResponseRedirect('/ui/query-protocol')
            except Exception as e:
                return render(request, 'ui/error.html', {'error_msg': e})
        else:
            return render(request, 'ui/error.html', {'error_msg': str(form.errors)})
    else:
        return render(request, 'ui/error.html', {'error_msg': 'Error method'})
Beispiel #21
0
def batch_job(request):
    if request.method == 'POST':
        form = BatchJobForm(request.POST, request.FILES)
        if form.is_valid():
            file_name = handle_uploaded_file(request.FILES['job_list'])
            try:
                protocol_cache = dict()
                try:
                    ws = Workspace.objects.get(id=request.session['workspace'])
                except Workspace.DoesNotExist:
                    ws = None
                with open(file_name) as f:
                    jobs = f.readlines()
                    job_list = []
                    for job in jobs:
                        configurations = job.split('\n')[0].split('\t')
                        if len(configurations) == 4:
                            if check_disk_quota_lock(
                                    request.user.queuedb_profile_related.
                                    delegate.id):
                                protocol_id = int(configurations[0])
                                if protocol_id not in protocol_cache:
                                    try:
                                        protocol = ProtocolList.objects.get(
                                            id=protocol_id)
                                        protocol_cache[protocol_id] = (int(
                                            protocol.user_id), protocol.ver)
                                    except Exception as e:
                                        return render(request, 'ui/error.html',
                                                      {'error_msg': e})
                                if protocol_cache[
                                    protocol_id][0] == request.user.queuedb_profile_related.delegate.id or request.user.is_staff or \
                                        protocol_cache[protocol_id][0] == 0:
                                    job_list.append(
                                        Job(protocol_id=protocol_id,
                                            protocol_ver=protocol_cache[
                                                protocol_id][1],
                                            job_name=configurations[1],
                                            input_file=configurations[2],
                                            parameter=configurations[3],
                                            run_dir=get_config(
                                                'env', 'workspace'),
                                            user=request.user.
                                            queuedb_profile_related.delegate,
                                            workspace=ws))
                                else:
                                    return render(
                                        request, 'ui/error.html', {
                                            'error_msg':
                                            'You are not the owner of the protocol(%s)'
                                            % protocol_id
                                        })
                            else:
                                return render(
                                    request, 'ui/error.html', {
                                        'error_msg':
                                        'You have exceed the disk quota limit! Please delete some files!'
                                    })
                        else:
                            return render(
                                request, 'ui/error.html', {
                                    'error_msg':
                                    'Your job list file must contain three columns.'
                                })

                    Job.objects.bulk_create(job_list)
                    return HttpResponseRedirect('/ui/query-job')

            except Exception as e:
                return render(request, 'ui/error.html', {'error_msg': e})
        else:
            return render(request, 'ui/error.html',
                          {'error_msg': str(form.errors)})
Beispiel #22
0
def add_job(request):
    if request.method == 'POST':
        job_form = SingleJobForm(request.POST)
        if job_form.is_valid():
            cd = job_form.cleaned_data
            try:
                if cd['parameter'].find(';') == -1:
                    cd['parameter'] += ';'
                protocol = ProtocolList.objects.get(id=cd['protocol'])
                if protocol.check_owner(
                        request.user.queuedb_profile_related.delegate):
                    try:
                        if 'workspace' in request.session:
                            ws = Workspace.objects.get(
                                id=request.session['workspace'])
                        else:
                            ws = None
                    except Workspace.DoesNotExist:
                        ws = None

                    job = Job(
                        protocol_id=cd['protocol'],
                        protocol_ver=protocol.ver,
                        job_name=cd['job_name'],
                        parameter=cd['parameter'],
                        run_dir=get_config('env', 'workspace'),
                        user=request.user.queuedb_profile_related.delegate,
                        input_file=cd['input_files'],
                        workspace=ws,
                    )

                    if check_disk_quota_lock(
                            request.user.queuedb_profile_related.delegate.id):
                        job.save()
                        Audition(operation="Created a new job",
                                 related_job=job,
                                 job_name=job.job_name,
                                 prev_par=job.parameter,
                                 new_par=job.parameter,
                                 prev_input=job.input_file,
                                 current_input=job.input_file,
                                 protocol=job.protocol.name,
                                 protocol_ver=job.protocol_ver,
                                 resume_point=job.resume,
                                 user=job.user).save()
                        return success('Successfully added job into queue.')
                    else:
                        return error(
                            'You have exceed the disk quota limit! Please delete some files!'
                        )
                else:
                    return error('You are not owner of the protocol.')

            except Exception as e:
                return error(e)
        return error(str(job_form.errors))
    else:
        if request.user.is_staff:
            available_protocol = ProtocolList.objects.all()
        else:
            available_protocol = ProtocolList.objects.filter(
                Q(user=request.user.queuedb_profile_related.delegate)
                | Q(user=None)).all()

        dt, du, dp = get_disk_quota_info_with_cache(
            request.user.queuedb_profile_related.delegate.id)

        return render(
            request, 'ui/add_job.html', {
                'form': SingleJobForm,
                'user_protocols': available_protocol,
                't_disk': dt,
                'u_disk': du,
                'disk_per': dp
            })
Beispiel #23
0
def install_reference(request):
    if request.method == "POST":
        import hashlib
        from json import loads
        from ..tools import get_maintenance_protocols
        ref_info = loads(request.POST["tool"])
        if type(ref_info["software"]) == str or type(
                ref_info["software"]) == unicode:
            protocol_name = ref_info["how_get"] + "_" + ref_info[
                "compression"] + "_" + ref_info["software"]
        else:
            protocol_name = ref_info["how_get"] + "_" + ref_info[
                "compression"] + "_" + "_".join(ref_info["software"])
        try:
            protocol_parent = ProtocolList.objects.get(name=protocol_name,
                                                       user_id=0)
        except ProtocolList.DoesNotExist:
            protocol_parent = ProtocolList(name=protocol_name, user_id=0)
            protocol_parent.save()
            steps = list()
            maintenance_protocols = get_maintenance_protocols()
            step_order = 1
            # download
            if ref_info["how_get"] not in maintenance_protocols and ref_info[
                    "how_get"] != "n":
                protocol_parent.delete()
                return error("No protocol to fetch the data")
            else:
                model = __import__("ui.maintenance_protocols." +
                                   ref_info["how_get"],
                                   fromlist=[ref_info["how_get"]])
                step_order, sub_steps = model.get_sub_protocol(
                    Step, protocol_parent, step_order)
                for sub_step in sub_steps:
                    steps.append(sub_step)
            # decompress
            if ref_info[
                    "compression"] not in maintenance_protocols and ref_info[
                        "compression"] != "n":
                protocol_parent.delete()
                return error("No protocol to decompress (%s) the data" %
                             ref_info["compression"])
            else:
                if ref_info["compression"] != "n":
                    model = __import__("ui.maintenance_protocols." +
                                       ref_info["compression"],
                                       fromlist=[ref_info["compression"]])
                    step_order, sub_steps = model.get_sub_protocol(
                        Step, protocol_parent, step_order)
                    for sub_step in sub_steps:
                        steps.append(sub_step)
            # post-decompression
            if type(ref_info['software']) == str or type(
                    ref_info['software']) == unicode:
                step_order += 1
                m = hashlib.md5((ref_info['software'] + ' ' +
                                 ref_info['parameter'].strip()).encode())
                # m.update(ref_info['software'] + ' ' + ref_info['parameter'].strip())
                steps.append(
                    Step(software=ref_info['software'],
                         parameter=ref_info['parameter'].strip(),
                         parent=protocol_parent,
                         hash=m.hexdigest(),
                         step_order=step_order,
                         user_id=0))
            elif len(ref_info['software']) == len(
                    ref_info['parameter']) and len(ref_info['software']) >= 1:
                for ind, software in enumerate(ref_info['software']):
                    step_order += 1
                    m = hashlib.md5(
                        (software + ' ' +
                         ref_info['parameter'][ind].strip()).encode())
                    # m.update(software + ' ' + ref_info['parameter'][ind].strip())
                    steps.append(
                        Step(software=software,
                             parameter=ref_info['parameter'][ind].strip(),
                             parent=protocol_parent,
                             hash=m.hexdigest(),
                             step_order=step_order,
                             user_id=0))
            # move to user's ref folder
            steps.append(
                Step(software="mv",
                     parameter="{{CompileTargets}} {{UserRef}}",
                     parent=protocol_parent,
                     user_id=0,
                     hash='d885188751fc204ad7bbdf63fc6564df',
                     step_order=step_order + 1))
            Step.objects.bulk_create(steps)
        user_ref_dir = os.path.join(
            os.path.join(
                get_config('env', 'workspace'),
                str(request.user.queuedb_profile_related.delegate.id)), 'ref')
        if not os.path.exists(user_ref_dir):
            try:
                os.makedirs(user_ref_dir)
            except:
                return error('Fail to create your reference folder')
        if "target_files" in ref_info.keys():
            mv_parameter = " ".join(ref_info["target_files"].split(";"))
        else:
            mv_parameter = "{{LastOutput}}"
        job = Job(
            protocol_id=protocol_parent.id,
            parameter='UserRef=%s;CompileTargets=%s' %
            (user_ref_dir, mv_parameter),
            run_dir=get_config('env', 'workspace'),
            user_id=request.user.queuedb_profile_related.delegate.id,
            input_file=ref_info["url"],
        )
        target_files = ref_info["target_files"].split(";")
        # create links to references
        ref_list = list()
        for target_file in target_files:
            ref_list.append(
                Reference(
                    name=ref_info['name'],
                    path=os.path.join(user_ref_dir, target_file),
                    description=ref_info['description'],
                    user_id=request.user.queuedb_profile_related.delegate.id,
                ))
        if len(ref_list) > 0:
            Reference.objects.bulk_create(ref_list)
        try:
            job.save()
            return success('Push the task into job queue.')
        except:
            return error('Fail to save the job.')
    else:
        api_bus = get_config('program', 'ref_repo_search_api', 1)
        tool_addr = get_config('program', 'ref_repo', 1)
        return render(request, 'ui/install_ref.html', {
            'ab': api_bus,
            'ta': tool_addr
        })
Beispiel #24
0
def fetch_data(request):
    if request.method == "POST":
        from ..tools import get_maintenance_protocols
        protocol_name = "Fetch Data From EBI ENA"
        try:
            protocol_record = ProtocolList.objects.get(name=protocol_name,
                                                       user_id=0)
        except ProtocolList.DoesNotExist:
            # build protocol
            protocol_parent = ProtocolList(name=protocol_name, user_id=0)
            protocol_parent.save()
            steps = list()
            maintenance_protocols = get_maintenance_protocols()
            # download
            step_order = 1
            if "download" not in maintenance_protocols:
                protocol_parent.delete()
                return error("No protocol to fetch the data")
            else:
                model = __import__("ui.maintenance_protocols.download",
                                   fromlist=["download"])
                step_order, sub_steps = model.get_sub_protocol(
                    Step, protocol_parent, step_order)
                for sub_step in sub_steps:
                    steps.append(sub_step)
            # decompress
            if "gunzip" not in maintenance_protocols:
                protocol_parent.delete()
                return error("No protocol to decompress (gz) the data")
            else:
                model = __import__("ui.maintenance_protocols.gunzip",
                                   fromlist=["gunzip"])
                step_order, sub_steps = model.get_sub_protocol(
                    Step, protocol_parent, step_order)
                for sub_step in sub_steps:
                    steps.append(sub_step)
            # move to user's uploads folder
            steps.append(
                Step(software="mv",
                     parameter="{{LastOutput}} {{UserUploads}}",
                     parent=protocol_parent,
                     user_id=0,
                     hash='c5f8bf22aff4c9fd06eb0844e6823d5f',
                     step_order=step_order))
            try:
                Step.objects.bulk_create(steps)
                protocol_record = protocol_parent
            except:
                protocol_parent.delete()
                return error('Fail to save the protocol.')
        user_upload_dir = os.path.join(
            os.path.join(
                get_config('env', 'workspace'),
                str(request.user.queuedb_profile_related.delegate.id)),
            'uploads')
        if not os.path.exists(user_upload_dir):
            try:
                os.makedirs(user_upload_dir)
            except:
                return error('Fail to create your uploads folder')
        from ..ena import query_download_link_from_ebi
        links = query_download_link_from_ebi(request.POST["acc"])
        if len(links) > 0:
            job = Job(
                job_name=request.POST["acc"],
                protocol_id=protocol_record.id,
                parameter='UserUploads=%s;' % user_upload_dir,
                run_dir=get_config('env', 'workspace'),
                user=request.user.queuedb_profile_related.delegate,
                input_file=";".join(links),
            )
            try:
                job.save()
                return success("<br>".join(links))
            except:
                return error('Fail to save the job.')
        else:
            return error("No result found.")
    else:
        return render(request, 'ui/fetch_data.html')
Beispiel #25
0
def settings(request):
    if request.method == 'POST':
        set_config('env', 'workspace', request.POST['path'])
        set_config('env', 'cpu', request.POST['cpu'])
        set_config('env', 'cpu_m', request.POST['cpu_m'])
        set_config('env', 'memory', request.POST['mem'])
        set_config('env', 'disk_quota', request.POST['dquota'])
        set_config('ml', 'confidence_weight_disk', request.POST['dcw'])
        set_config('ml', 'confidence_weight_mem', request.POST['mcw'])
        set_config('ml', 'confidence_weight_cpu', request.POST['ccw'])
        set_config('ml', 'threshold', request.POST['ccthr'])

        if request.POST['mailhost'] != '':
            set_config('mail', 'notify', 'on')
            set_config('mail', 'mail_host', request.POST['mailhost'])
            set_config('mail', 'mail_port', request.POST['mailport'])
            set_config('mail', 'mail_user', request.POST['mailuser'])
            set_config('mail', 'mail_password', request.POST['mailpassword'])
            if request.POST['protocol'] == 'ssl':
                set_config('mail', 'ssl', 'true')
            elif request.POST['protocol'] == 'tls':
                set_config('mail', 'tls', 'true')
        else:
            set_config('mail', 'notify', 'off')

        if request.POST['cluster_type'] != '':
            set_config('cluster', 'type', request.POST['cluster_type'])
            set_config('cluster', 'cpu', request.POST['job_cpu'])
            set_config('cluster', 'queue', request.POST['job_dest'])
            set_config('cluster', 'mem', request.POST['job_mem'])
            set_config('cluster', 'vrt', request.POST['job_vrt'])
            set_config('cluster', 'walltime', request.POST['job_wt'])
        else:
            set_config('cluster', 'type', '')
            set_config('cluster', 'cpu', '')
            set_config('cluster', 'queue', '')
            set_config('cluster', 'mem', '')
            set_config('cluster', 'vrt', '')
            set_config('cluster', 'walltime', '')

        return HttpResponseRedirect('/ui/settings')
    else:
        from worker.cluster_support import get_cluster_models
        try:
            if get_config('mail', 'ssl') == 'true':
                mail_protocol = 'ssl'
            elif get_config('mail', 'tls') == 'true':
                mail_protocol = 'tls'
            else:
                mail_protocol = 'nm'

            configuration = {
                'run_folder':
                get_config('env', 'workspace'),
                'cpu':
                get_config('env', 'cpu'),
                'cpu_m':
                get_config('env', 'cpu_m'),
                'memory':
                get_config('env', 'memory'),
                'disk_quota':
                get_config('env', 'disk_quota'),
                'threshold':
                get_config('ml', 'threshold'),
                'disk_confidence_weight':
                get_config('ml', 'confidence_weight_disk'),
                'mem_confidence_weight':
                get_config('ml', 'confidence_weight_mem'),
                'cpu_confidence_weight':
                get_config('ml', 'confidence_weight_cpu'),
                'max_disk':
                round((get_disk_free(get_config('env', 'workspace')) +
                       get_disk_used(get_config('env', 'workspace'))) /
                      1073741824),
                'free_disk':
                round(
                    get_disk_free(get_config('env', 'workspace')) /
                    1073741824),
                'mail_host':
                get_config('mail', 'mail_host'),
                'mail_port':
                get_config('mail', 'mail_port'),
                'mail_user':
                get_config('mail', 'mail_user'),
                'mail_password':
                get_config('mail', 'mail_password'),
                'mail_protocol':
                mail_protocol,
                'cluster_models':
                get_cluster_models(),
                'cluster_type':
                get_config('cluster', 'type'),
                'job_cpu':
                get_config('cluster', 'cpu'),
                'job_dest':
                get_config('cluster', 'queue'),
                'job_mem':
                get_config('cluster', 'mem'),
                'job_vrt':
                get_config('cluster', 'vrt'),
                'job_wt':
                get_config('cluster', 'walltime'),
                'rv':
                get_config('program', 'latest_version', 1),
                'cv':
                get_bioqueue_version(),
            }
        except Exception as e:
            return render(request, 'ui/error.html', {'error_msg': e})

        return render(request, 'ui/settings.html', configuration)
Beispiel #26
0
def save_job(archive_id, logger):
    def file_md5(file_path):
        hash_md5 = hashlib.md5()
        with open(file_path, "rb") as fh:
            for chunk in iter(lambda: fh.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()

    try:
        archive = FileArchive.objects.get(id=archive_id)
        logger.info("Working on %s" % archive)
        try:
            prev_same_archive = FileArchive.objects.filter(
                protocol=archive.protocol,
                protocol_ver=archive.protocol_ver,
                files=archive.files).exclude(file_md5s="ph")
            logger.info("# md5 caches" % len(prev_same_archive))
        except:
            prev_same_archive = None
            logger.info("No md5 cache")
        md5_flag = 0
        tared_flag = 0
        uploaded_flag = 0
        files = json.loads(archive.files.replace("'", "\""))
        if prev_same_archive is not None:
            for psa in prev_same_archive:
                md5_flag = 1
                archive.file_md5s = psa.file_md5s
                if psa.archive_file != "ph":
                    archive.archive_file = psa.archive_file
                    tared_flag = 1
                if psa.file_id_remote != "ph":
                    archive.file_id_remote = psa.file_id_remote
                    uploaded_flag = 1
                archive.save()
        if not md5_flag:
            logger.info("Calculating MD5 sums")
            md5_sums = []
            for file in files:
                md5_sums.append(file_md5(file))
            archive.file_md5s = ",".join(md5_sums)
            archive.save()
        if not tared_flag:
            logger.info("Compressing files")
            import tarfile
            user_archives = archive.user.queuedb_profile_related.delegate.queuedb_profile_related.archive_folder
            if user_archives != "":
                if not os.path.exists(user_archives):
                    try:
                        os.makedirs(user_archives)
                    except Exception as e:
                        logger.exception(e)
                        archive_folder = ""
            if user_archives == "":
                user_archives = os.path.join(
                    get_config("env", "workspace"),
                    str(archive.user.queuedb_profile_related.delegate.id),
                    "archives")
            if not os.path.exists(user_archives):
                os.makedirs(user_archives)
            tan = "%s.tar.gz" % os.path.join(user_archives, str(archive.id))
            tf = tarfile.open(tan, mode="w:gz")
            for f in files:
                tf.add(f, arcname=os.path.basename(f))
            tf.close()
            archive.archive_file = tan
            archive.save()
        # if not uploaded_flag:
        #     logger.info("Uploading files")
        #     try:
        #         g_info = GoogleDriveConnection.objects.get(user=archive.created_by)
        #         g = GoogleDrive(token_file=g_info.credential_pickle)
        #         folder_tag, folder_id = g.already_folder("BioQueue_Share")
        #         if not folder_tag:
        #             folder_id = g.create_folder("BioQueue_Share")
        #         file_id = g.upload_file(file_name="%d.tar.gz" % archive.id, file_path=archive.archive_file,
        #                                 folder_id=folder_id, description=archive.description)
        #         archive.file_id_remote = file_id
        #         archive.status = 1
        #         archive.save()
        #
        #         users = archive.shared_with.split(",")
        #         g.share_with_person(file_id, users, msg=archive.description+"\n Protocol ver: "+archive.protocol_ver)
        #     except GoogleDriveConnection.DoesNotExist:
        #         archive.comment = "No Google Drive connection available for account %s" % archive.created_by.username
        #         archive.save()
        #     except Exception as e:
        #         archive.comment(e)
        #         logger.error(e)
        #         archive.save()
    except FileArchive.DoesNotExist as e:
        logger.error(e)
        return False
Beispiel #27
0
def register_sample(request):
    if request.method == 'POST':
        reg_form = AddSampleForm(request.POST)
        if reg_form.is_valid():
            cd = reg_form.cleaned_data
            try:
                exp = Experiment.objects.get(id=cd['experiment'])
                try:
                    Sample.objects.get(
                        user=request.user.queuedb_profile_related.delegate,
                        file_path=cd["file_path"])
                    return error("Sample already exist!")
                except Sample.DoesNotExist:
                    # if user prefers not to use the default upload folder
                    # then BioQueue will create symbolic links in the uploads
                    # folder for user
                    if request.user.queuedb_profile_related.upload_folder != "":
                        first_order_soft_link = os.path.join(
                            get_config('env', 'workspace'),
                            str(request.user.queuedb_profile_related.delegate.
                                id), "OVERRIDE_UPLOAD")
                        if not os.path.exists(
                                first_order_soft_link) or not os.path.islink(
                                    first_order_soft_link):
                            os.symlink(
                                request.user.queuedb_profile_related.
                                upload_folder, first_order_soft_link)
                        expected_upload_path = os.path.join(
                            get_config('env', 'workspace'),
                            str(request.user.queuedb_profile_related.delegate.
                                id), "uploads")
                        for file in cd["inner_path"].split(";"):
                            real_path = base64.b64decode(file).decode("utf-8")
                            dst_path = os.path.join(
                                expected_upload_path,
                                os.path.split(real_path)[1])

                            if not os.path.exists(dst_path):
                                os.symlink(src=os.path.join(
                                    "../OVERRIDE_UPLOAD", real_path),
                                           dst=dst_path)
                            else:
                                return error(
                                    "This sample is already registered")

                    Sample(name=cd["name"],
                           file_path=";".join([
                               base64.b64decode(fp.encode()).decode()
                               for fp in cd["file_path"].split(";") if fp != ""
                           ]),
                           user=request.user.queuedb_profile_related.delegate,
                           experiment=exp,
                           attribute=cd["attribute"],
                           inner_path=cd["inner_path"]).save()
                    return success("Sample registered")
                except Sample.MultipleObjectsReturned:
                    return error("Duplicate records!")
            except Exception as e:
                return error(e)
        else:
            return error(str(reg_form.errors))
    else:
        block_files = Sample.objects.filter(
            user=request.user.queuedb_profile_related.delegate)
        block_file_real = set()
        for bf in block_files:
            for rbf in bf.inner_path.split(";"):
                block_file_real.add(rbf)
        if request.user.queuedb_profile_related.upload_folder != "":
            ufs = show_workspace_files(
                request.user.queuedb_profile_related.delegate.id,
                request.user.queuedb_profile_related.upload_folder,
                block_files=block_file_real)
        else:
            ufs = show_workspace_files(
                request.user.queuedb_profile_related.delegate.id,
                'uploads',
                block_files=block_file_real)
        context = {
            'user_files':
            ufs,
            'user_ref_files':
            show_workspace_files(
                request.user.queuedb_profile_related.delegate.id, 'refs'),
            'experiments':
            Experiment.objects.filter(),
        }

        return render(request, 'ui/register_sample.html', context)