Esempio n. 1
0
def current_jobs(request):
    """
    Display status information about any job servers listed in
    ``settings.JOB_SERVERS`` (or the local job server if appropriate),
    as well as information about any jobs (reports) in progress.
    """
    jservers = [(socket.gethostname(), socket.gethostbyname(socket.gethostname()))]
    servers = []
    jobs = []
    for server_name, ip in jservers:
        short_name = "%s (%s)" % (server_name, ip)
        try:
            conn = client.connect(ip, settings.JOBSERVER_PORT)
            running = conn.running()
            uptime = seconds2htime(conn.uptime())
            nrunning = len(running)
            servers.append((server_name, ip, True, nrunning, uptime,))
            server_up = True
        except (socket.error, xmlrpclib.Fault):
            servers.append((server_name, ip, False, 0, 0,))
            server_up = False
        if server_up:
            runs = dict((r[2], r) for r in running)
            results = Results.objects.select_related('experiment').filter(pk__in=runs.keys()).order_by('pk')
            for result in results:
                name, pid, pk, atype, stat = runs[result.pk]
                jobs.append((short_name, name, pid, atype, stat,
                             result, result.experiment))
    ctxd = {"jobs": jobs, "servers": servers}
    return ctxd
Esempio n. 2
0
File: ajax.py Progetto: alecw/TS
def analysis_liveness(request, pk):
    """Determine if an analysis has been successfully started.
    """
    try:
        pk = int(pk)
    except (TypeError, ValueError):
        return http.HttpResponseNotFound

    result = shortcuts.get_object_or_404(models.Results, pk=pk)
    if settings.TEST_INSTALL:
        rl = path.join(path.dirname(result.reportLink), "ion_params_00.json")
    else:
        rl = result.reportLink
    url = urlparse.urlparse(rl)
    loc = result.server_and_location()
    web_path = result.web_path(loc)
    report = result.reportLink
    log = path.join(web_path, "log.html")
    save_path = result.web_root_path(loc)
    report_exists = result.report_exist()
    ip = "127.0.0.1"
    if ip is None:
        return http.HttpResponseNotFound("no job server found for %s" % loc.name)
    proxy = anaclient.connect(ip, 10000)
    # if the job is finished, it will be shown as 'failed'
    success, status = proxy.status(save_path, result.pk)
    return render_to_json({"success": success, "log": log, "report": report, "exists": report_exists, "status": status})
Esempio n. 3
0
def current_jobs():
    """
    Get list of running jobs from job server
    """
    jobs = []
    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        running = conn.running()
        runs = dict((r[2], r) for r in running)
        
        results = Results.objects.filter(pk__in=runs.keys()).order_by('pk')
        for result in results:
            name, pid, pk, atype, stat = runs[result.pk]
            jobs.append({
                'name': name,
                'resultsName': result.resultsName,
                'pid': pid,
                'type': 'analysis',
                'status': stat,
                'pk': result.pk,
                'report_exist': result.report_exist(),
                'report_url': reverse('report', args=(pk,)),
                'term_url': reverse('control_job', args=(pk,'term'))
            })
    except (socket.error, xmlrpclib.Fault):
        pass

    return jobs
Esempio n. 4
0
def UpdatePluginStatus(pk, plugin, version, msg, host="127.0.0.1", port = settings.IPLUGIN_PORT, method="update"):
    """
    Updates a plugins status through XMLRPC
    """
    # Get Fault so we can catch errors
    retries = 10
    delay = 5
    # Total timeout = 60*5 == 300 == 5min
    attempts = 0

    while attempts < retries:
        try:
            conn = client.connect(host,port)
            methodAttach = getattr(conn,method)
            ret = methodAttach(pk,plugin,msg)
            print "Plugin Update returns: '%s'" % ret
            break
        except (socket.error, xmlrpclib.Fault, xmlrpclib.ProtocolError, xmlrpclib.ResponseError) as f:
            print "XMLRPC Error: %s" % f
            if attempts < retries:
                print "Error connecting to plugin daemon at %s:%d. Retrying in %d" % (host, port, delay)
                if attempts % 5:
                    print "Error connecting to plugin daemon at %s:%d. Retrying in %d" % (host, port, delay)
                    time.sleep(delay)
                attempts += 1
            else:
                raise "Unable to connect to plugin daemon after multiple attempts"

    print "Plugin '%s' Successfully Updated" % plugin
Esempio n. 5
0
    def callPluginXMLRPC(self, start_json, host="127.0.0.1", port=settings.IPLUGIN_PORT):

        # Get Fault so we can catch errors
        retries = 60
        delay = 5
        # Total timeout = 60*5 == 300 == 5min
        attempts = 0

        log = logging.getLogger(__name__)
        log.propagate = False
        log.info("XMLRPC call to '%s':'%d'", host, port)

        while attempts < retries:
            try:
                conn = client.connect(host,port)
                ret = conn.pluginStart(start_json)
                log.info("Plugin %s Queued '%s'",
                         start_json["runinfo"]["plugin_name"], ret)
                return ret
            except (socket.error, xmlrpclib.Fault, xmlrpclib.ProtocolError, xmlrpclib.ResponseError) as f:
                log.exception("XMLRPC Error")
                if attempts < retries:
                    log.warn("Error connecting to plugin daemon at %s:%d. Retrying in %d", host, port, delay)
                    time.sleep(delay)
                    attempts += 1
        # Exceeded number of retry attempts
        return False
Esempio n. 6
0
def update_files_in_use():
    """
    Updates DMFileStats files_in_use (used in dmactions.action_validation to block file removal)
    Contacts jobserver to retrieve currently active analysis jobs.
    Also adds any results with active plugins.
    """
    old_in_use = DMFileStat.objects.exclude(files_in_use='')

    conn = client.connect(settings.JOBSERVER_HOST, settings.JOBSERVER_PORT)
    running = conn.running()
    active = []
    for item in running:
        try:
            result = Results.objects.get(pk=item[2])
        except ObjectDoesNotExist:
            logger.warn("Results object does not exist: %d" % (item[2]),
                        extra=logid)
            continue
        # check the status - completed results still show up on the running list for a short time
        if result.status == 'Completed' or result.status == 'TERMINATED':
            continue

        # update files_in_use for current result
        msg = "%s (%s), status = %s" % (result.resultsName, result.id,
                                        result.status)
        dmfilestats = result.dmfilestat_set.all()
        dmfilestats.update(files_in_use=msg)
        active.extend(dmfilestats.values_list('pk', flat=True))

        # update files_in_use for related sigproc dmfilestats
        dmfilestats = DMFileStat.objects.filter(
            dmfileset__type=dmactions_types.SIG,
            result__experiment_id=result.experiment_id).exclude(
                result__pk=result.pk)
        dmfilestats.update(files_in_use=msg)
        active.extend(dmfilestats.values_list('pk', flat=True))

    # Add reports that have plugins currently running
    # consider only plugins with state='Started' and starttime within a day
    timerange = datetime.now(pytz.UTC) - timedelta(days=1)
    pluginresults = PluginResultJob.objects.filter(state='Started',
                                                   starttime__gt=timerange)
    for pk in set(
            pluginresults.values_list('plugin_result__result__pk', flat=True)):
        result = Results.objects.get(pk=pk)
        dmfilestats = result.dmfilestat_set.all()
        msg = "plugins running on %s (%s)" % (result.resultsName, result.id)
        dmfilestats.update(files_in_use=msg)
        active.extend(dmfilestats.values_list('pk', flat=True))

    # reset files_in_use to blank for any non-active dmfilestats
    num = old_in_use.exclude(pk__in=active).update(files_in_use='')
    logger.debug(
        'update_files_in_use(): %s dmilestats in use by active jobs, %s released for dmactions'
        % (len(active), num),
        extra=logid)
Esempio n. 7
0
def get_servers():
    jservers = [(socket.gethostname(), socket.gethostbyname(socket.gethostname()))]
    servers = []
    for server_name, ip in jservers:
        try:
            conn = client.connect(ip, settings.JOBSERVER_PORT)
            nrunning = conn.n_running()
            uptime = seconds2htime(conn.uptime())
            servers.append((server_name, ip, True, nrunning, uptime,))
        except (socket.error, xmlrpclib.Fault):
            servers.append((server_name, ip, False, 0, 0,))
    return servers
Esempio n. 8
0
File: ajax.py Progetto: golharam/TS
def control_job(request, pk, signal):
    """Send ``signal`` to the job denoted by ``pk``, where ``signal``
    is one of
    
    * ``"term"`` - terminate (permanently stop) the job.
    * ``"stop"`` - stop (pause) the job.
    * ``"cont"`` - continue (resume) the job.
    """
    pk = int(pk)
    if signal not in set(("term", "stop", "cont")):
        return http.HttpResponseNotFound("No such signal")
    result = shortcuts.get_object_or_404(models.Results, pk=pk)
    loc = result.server_and_location()
    ip = '127.0.0.1'  #assume, webserver and jobserver on same appliance 
    conn = anaclient.connect(ip, settings.JOBSERVER_PORT)
    result.status = 'TERMINATED'
    result.save()
    return render_to_json(conn.control_job(pk,signal))
Esempio n. 9
0
File: ajax.py Progetto: dkeren/TS
def analysis_liveness(request, pk):
    """Determine if an analysis has been successfully started.
    """
    try:
        pk = int(pk)
    except (TypeError, ValueError):
        return http.HttpResponseNotFound

    result = shortcuts.get_object_or_404(models.Results, pk=pk)
    if settings.TEST_INSTALL:
        rl = path.join(path.dirname(result.reportLink), "ion_params_00.json")
    else:
        rl = result.reportLink

    url = urlparse.urlparse(rl)
    loc = result.server_and_location()

    web_path = result.web_path(loc)
    #    report = result.reportLink
    report = "/report/%s" % pk
    save_path = result.web_root_path(loc)

    if web_path:
        log = reverse("report_log", args=(pk, ))
        report_exists = result.report_exist()
    else:
        log = False
        report_exists = False

    ip = "127.0.0.1"
    if ip is None:
        return http.HttpResponseNotFound("no job server found for %s" %
                                         loc.name)
    proxy = anaclient.connect(ip, 10000)
    #if the job is finished, it will be shown as 'failed'
    success, status = proxy.status(save_path, result.pk)
    return render_to_json({
        "success": success,
        "log": log,
        "report": report,
        "exists": report_exists,
        "status": status
    })
Esempio n. 10
0
def update_files_in_use():
    """
    Updates DMFileStats files_in_use (used in dmactions.action_validation to block file removal)
    Contacts jobserver to retrieve currently active analysis jobs.
    Also adds any results with active plugins.
    """
    old_in_use = DMFileStat.objects.exclude(files_in_use='')

    conn = client.connect(settings.JOBSERVER_HOST, settings.JOBSERVER_PORT)
    running = conn.running()
    active = []
    for item in running:
        try:
            result = Results.objects.get(pk=item[2])
        except ObjectDoesNotExist:
            logger.warn("Results object does not exist: %d" % (item[2]), extra=logid)
            continue
        # check the status - completed results still show up on the running list for a short time
        if result.status == 'Completed' or result.status == 'TERMINATED':
            continue
        # set files_in_use for current result plus any related sigproc dmfilestats
        dmfilestats = result.dmfilestat_set.all() | DMFileStat.objects.filter(
            dmfileset__type=dmactions_types.SIG, result__experiment_id=result.experiment_id)
        msg = "%s (%s), status = %s" % (result.resultsName, result.id, result.status)
        dmfilestats.update(files_in_use=msg)
        active.extend(dmfilestats.values_list('pk', flat=True))

    # Add reports that have plugins currently running
    # consider only plugins with state='Started' and starttime within a day
    timerange = datetime.now(pytz.UTC) - timedelta(days=1)
    pluginresults = PluginResultJob.objects.filter(state='Started', starttime__gt=timerange)
    for pk in set(pluginresults.values_list('plugin_result__result__pk', flat=True)):
        result = Results.objects.get(pk=pk)
        dmfilestats = result.dmfilestat_set.all()
        msg = "plugins running on %s (%s)" % (result.resultsName, result.id)
        dmfilestats.update(files_in_use=msg)
        active.extend(dmfilestats.values_list('pk', flat=True))

    # reset files_in_use to blank for any non-active dmfilestats
    num = old_in_use.exclude(pk__in=active).update(files_in_use='')
    logger.debug('update_files_in_use(): %s dmilestats in use by active jobs, %s released for dmactions' %
                 (len(active), num), extra=logid)
Esempio n. 11
0
def launch_analysis_job(result, params, doThumbnail):
    ''' Create files and send to jobServer '''

    def create_tf_conf():
        """
        Build the contents of the report TF file (``DefaultTFs.conf``)
        """
        fname = "DefaultTFs.conf"
        tfs = models.Template.objects.filter(isofficial=True).order_by('name')
        lines = ["%s,%s,%s" % (tf.name, tf.key, tf.sequence,) for tf in tfs]

        return (fname, "\n".join(lines))

    def create_bc_conf(barcodeId, fname):
        """
        Creates a barcodeList file for use in barcodeSplit binary.

        Danger here is if the database returns a blank, or no lines, then the
        file will be written with no entries.  The use of this empty file later
        will generate no fastq files, except for the nomatch.fastq file.

        See C source code BarCode.h for list of valid keywords
        """
        # Retrieve the list of barcodes associated with the given barcodeId
        db_barcodes = models.dnaBarcode.objects.filter(name=barcodeId).order_by("index")
        lines = []
        for db_barcode in db_barcodes:
            lines.append('barcode %d,%s,%s,%s,%s,%s,%d,%s' % (db_barcode.index, db_barcode.id_str, db_barcode.sequence, db_barcode.adapter, db_barcode.annotation, db_barcode.type, db_barcode.length, db_barcode.floworder))
        if db_barcodes:
            lines.insert(0, "file_id %s" % db_barcodes[0].name)
            lines.insert(1, "score_mode %s" % str(db_barcodes[0].score_mode))
            lines.insert(2, "score_cutoff %s" % str(db_barcodes[0].score_cutoff))
        return (fname, "\n".join(lines))

    def create_pk_conf(pk):
        """
        Build the contents of the report primary key file (``primary.key``).
        """
        text = "ResultsPK = %d" % pk
        return ("primary.key", text)

    def create_meta(experiment, result):
        """Build the contents of a report metadata file (``expMeta.dat``)."""
        def get_chipcheck_status(exp):
            """
            Load the explog stored in the log field in the experiment
            table into a python dict.  Check if `calibratepassed` is set
            """
            data = exp.log
            if data.get('calibratepassed', 'Not Found'):
                return 'Passed'
            else:
                return 'Failed'

        lines = ("Run Name = %s" % experiment.expName,
                 "Run Date = %s" % experiment.date,
                 "Run Flows = %s" % experiment.flows,
                 "Project = %s" % ','.join(p.name for p in result.projects.all()),
                 "Sample = %s" % experiment.get_sample(),
                 "Library = N/A",
                 "Reference = %s" % result.eas.reference,
                 "Instrument = %s" % experiment.pgmName,
                 "Flow Order = %s" % (experiment.flowsInOrder.strip() if experiment.flowsInOrder.strip() != '0' else 'TACG'),
                 "Library Key = %s" % result.eas.libraryKey,
                 "TF Key = %s" % result.eas.tfKey,
                 "Chip Check = %s" % get_chipcheck_status(experiment),
                 "Chip Type = %s" % experiment.chipType,
                 "Chip Data = %s" % experiment.rawdatastyle,
                 "Notes = %s" % experiment.notes,
                 "Barcode Set = %s" % result.eas.barcodeKitName,
                 "Analysis Name = %s" % result.resultsName,
                 "Analysis Date = %s" % date.today(),
                 "Analysis Flows = %s" % result.processedflows,
                 "runID = %s" % result.runid,
                 )

        return ('expMeta.dat', '\n'.join(lines))

    # Default control script definition
    scriptname = 'TLScript.py'

    from distutils.sysconfig import get_python_lib;
    python_lib_path = get_python_lib()
    scriptpath = os.path.join(python_lib_path, 'ion/reports', scriptname)
    try:
        with open(scriptpath, "r") as f:
            script = f.read()
    except Exception as error:
        raise Exception("Error reading %s\n%s" % (scriptpath, error.args))

    # test job server connection
    webRootPath = result.get_report_path()
    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        to_check = os.path.dirname(webRootPath)
    except (socket.error, xmlrpclib.Fault):
        raise Exception("Failed to contact job server.")

    # the following files will be written into result's directory
    files = []
    files.append(create_tf_conf())          # DefaultTFs.conf
    files.append(create_meta(result.experiment, result))  # expMeta.dat
    files.append(create_pk_conf(result.pk))  # primary.key
    # barcodeList.txt
    barcodeKitName = result.eas.barcodeKitName
    if barcodeKitName:
        files.append(create_bc_conf(barcodeKitName, "barcodeList.txt"))

    try:
        chips = models.Chip.objects.all()
        chip_dict = dict((c.name, '-pe ion_pe %s' % str(c.slots)) for c in chips)
    except:
        chip_dict = {}  # just in case we can't read from the db

    try:
        ts_job_type = 'thumbnail' if doThumbnail else ''
        conn.startanalysis(result.resultsName, script, params, files,
                           webRootPath, result.pk, result.experiment.chipType, chip_dict, ts_job_type)
    except (socket.error, xmlrpclib.Fault):
        raise Exception("Failed to contact job server.")
Esempio n. 12
0
def new_genome(request):
    """This is the page to create a new genome. The XML-RPC server is ionJobServer.
    """

    if request.method=="POST":
        """parse the data sent in"""

        #required
        name = request.POST.get('name',False)
        short_name = request.POST.get('short_name',False)
        fasta = request.POST.get('target_file',False)
        version = request.POST.get('version',False)
        notes = request.POST.get('notes',"")

        #optional
        read_sample_size = request.POST.get('read_sample_size',False)
        read_exclude_length = request.POST.get('read_exclude_length',False)

        #URL download
        url = request.POST.get('url',False)

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta, version)):
            return render_to_json({"status":"Form validation failed","error":True})

        if not set(short_name).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"):
            return render_to_json( {"status":"The short name has invalid characters. The valid values are letters, numbers, and underscores.","error": True} )

        #TODO: check to make sure the zip file only has one fasta or fa

        path = "/results/referenceLibrary/temp/"
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size',False)

            try:
                uploaded_file_size = str(os.path.getsize(path + fasta))
            except OSError:
                return render_to_json( {"status":"The FASTA temporary files was not found","error":True} )

            if reported_file_size != uploaded_file_size :
                try:
                    os.remove(path + fasta)
                    pass
                except OSError:
                    return render_to_json( {"status":"The FASTA temporary did not match the expected size, and could not be deleted.","error":True} )
                return render_to_json( {"status":
                                        "The file you uploaded differs from the expected size. This is due to an error uploading.  The temporary file has been removed.",
                                        "reported": reported_file_size,
                                        "uploaded": uploaded_file_size,
                                        "error" : True
                } )

        #Make an genome ref object
        if models.ReferenceGenome.objects.filter(short_name=short_name,index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({"status":"Failed - Genome with this short name and index version already exist.","error":True})
        rg = models.ReferenceGenome()
        rg.name = name
        rg.short_name = short_name
        rg.version = version
        rg.date = datetime.datetime.now()
        rg.notes = notes
        rg.status = "queued"
        rg.enabled = False
        rg.index_version = settings.TMAP_VERSION

        #before the object is saved we should ping the xml-rpc server to see if it is alive.
        try:
            host = "127.0.0.1"
            conn = client.connect(host,settings.JOBSERVER_PORT)
            #just check uptime to make sure the call does not fail
            conn.uptime()
        except (socket.error,xmlrpclib.Fault):
            return render_to_json( {"status":"Unable to connect to ionJobserver process.  You may need to restart ionJobserver","error":True} )

        #if the above didn't fail then we can save the object
        #this object must be saved before the tmap call is made
        rg.save()

        #kick off the anaserve tmap xmlrpc call
        import traceback
        try:
            host = "127.0.0.1"
            conn = client.connect(host,settings.JOBSERVER_PORT)
            tmap_bool, tmap_status = conn.tmap(str(rg.id), fasta, short_name, name, version, read_sample_size, read_exclude_length, settings.TMAP_VERSION)
        except (socket.error,xmlrpclib.Fault):
            #delete the genome object, because it was not sucessful
            rg.delete()
            return render_to_json( {"status":"Error with index creation", "error" : traceback.format_exc() } )

        if not tmap_bool:
            rg.delete()
            return render_to_json( {"status":tmap_status,"error":True} )

        return render_to_json({"status":"The genome index is being created.  This might take a while, check the status on the references tab. You are being redirected there now.","error":False})

    elif request.method=="GET":

        ctxd = {}
        ctx = template.RequestContext(request, ctxd)

        #when we get a POST that data should be validated and go to the xmlrpc process
        return shortcuts.render_to_response("rundb/ion_new_genome.html", context_instance=ctx)
Esempio n. 13
0
def _combine_results_sendto_project(project_pk, json_data, username=""):
    project = Project.objects.get(id=project_pk)
    projectName = project.name

    name = json_data["name"]
    mark_duplicates = json_data["mark_duplicates"]
    ids_to_merge = json_data["selected_pks"]

    # check reference and flow order the same in all selected results
    for pk in ids_to_merge:
        result = Results.objects.get(pk=pk)
        if pk == ids_to_merge[0]:
            reference = result.reference
            floworder = result.experiment.flowsInOrder
        elif not reference == result.reference:
            raise Exception("Selected results do not have the same Alignment Reference.")
        elif not floworder == result.experiment.flowsInOrder:
            floworder = ""

    # create new entry in DB for combined Result
    delim = ":"
    filePrefix = "CombineAlignments"  # this would normally be Experiment name that's prefixed to all filenames
    result = create_combined_result("CA_%s_%s" % (name, projectName))
    result.projects.add(project)
    result.resultsType = "CombinedAlignments"
    result.parentIDs = delim + delim.join(ids_to_merge) + delim
    result.reference = reference
    result.sffLink = path.join(result.reportLink, "%s_%s.sff" % (filePrefix, result.resultsName))
    result.save()

    # gather parameters to pass to merging script
    links = []
    bams = []
    names = []
    plan = {}
    parents = Results.objects.filter(id__in=ids_to_merge).order_by("-timeStamp")
    for parent in parents:
        links.append(parent.reportLink)
        names.append(parent.resultsName)
        bamFile = path.split(parent.sffLink)[1].rstrip(".sff") + ".bam"
        bams.append(path.join(parent.get_report_dir(), bamFile))

        # need Plan info for VariantCaller etc. plugins: but which plan to use??
        try:
            planObj = [parent.experiment.plan]
            plan_json = serializers.serialize("json", planObj)
            plan_json = json.loads(plan_json)
            plan = plan_json[0]["fields"]
        except:
            pass

    try:
        genome = models.ReferenceGenome.objects.all().filter(
            short_name=reference, index_version=settings.TMAP_VERSION, enabled=True
        )[0]
        if path.exists(genome.info_text()):
            genomeinfo = genome.info_text()
        else:
            genomeinfo = ""
    except:
        genomeinfo = ""

    params = {
        "resultsName": result.resultsName,
        "parentIDs": ids_to_merge,
        "parentNames": names,
        "parentLinks": links,
        "parentBAMs": bams,
        "libraryName": result.reference,
        "tmap_version": settings.TMAP_VERSION,
        "mark_duplicates": mark_duplicates,
        "plan": plan,
        "run_name": filePrefix,
        "genomeinfo": genomeinfo,
        "warnings": validate_results_to_combine(parents),
    }

    scriptpath = "/usr/lib/python2.6/dist-packages/ion/reports/combineReports.py"

    try:
        with open(scriptpath, "r") as f:
            script = f.read()
    except Exception as error:
        result.status = "Error reading %s\n%s" % (scriptpath, error.args)
        raise Exception(result.status)

    files = []
    # do we need expMeta?
    lines = (
        "Project = %s" % ",".join(p.name for p in result.projects.all()),
        "Library = %s" % result.reference,
        "Analysis Name = %s" % result.resultsName,
        "Flow Order = %s" % floworder,
        "Run Name = %s" % filePrefix,
    )
    files.append(("expMeta.dat", "\n".join(lines)))
    files.append(("primary.key", "ResultsPK = %s" % result.pk))

    webRootPath = result.web_root_path(_location())

    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        conn.startanalysis(
            result.resultsName, script, params, files, webRootPath, result.pk, "", {}, "combineAlignments"
        )
    except:
        result.status = "Failed to contact job server."
        raise Exception(result.status)

    # log project history
    message = "Combine results %s into name= %s (%s), auto-assign to project name= %s (%s)." % (
        ids_to_merge,
        result.resultsName,
        result.pk,
        projectName,
        project_pk,
    )
    EventLog.objects.add_entry(project, message, username)

    return result
Esempio n. 14
0
File: genomes.py Progetto: aidjek/TS
def new_genome(request):
    """This is the page to create a new genome. The XML-RPC server is ionJobServer.
    """

    if request.method == "POST":
        # parse the data sent in

        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', False)
        notes = request.POST.get('notes', "")

        #optional
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)

        error_status = ""
        reference_path = REFERENCE_LIBRARY_TEMP_DIR + fasta

        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta, version)):
            return render_to_json({
                "status": "Form validation failed",
                "error": True
            })

        if not set(short_name).issubset(
                "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
        ):
            return render_to_json({
                "status":
                "The short name has invalid characters. The valid values are letters, numbers, and underscores.",
                "error": True
            })

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({
                    "status": "The FASTA temporary files was not found",
                    "error": True
                })

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (fasta.lower().endswith(".fasta")
                    or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a .fasta or .zip extension.  It must be a plain text fasta file or a Zip compressed fasta."

            if why_delete:
                try:
                    os.remove(reference_path)
                except OSError:
                    why_delete += " The FASTA file could not be deleted."
                return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name,
                                          index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({
                "status":
                "Failed - Genome with this short name and index version already exist.",
                "error": True
            })
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.date = datetime.datetime.now()
        ref_genome.notes = notes
        ref_genome.status = "queued"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION

        #before the object is saved we should ping the xml-rpc server to see if it is alive.
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            #just check uptime to make sure the call does not fail
            conn.uptime()
            logger.debug('Connected to ionJobserver process.')
        except (socket.error, xmlrpclib.Fault):
            return render_to_json({
                "status":
                "Unable to connect to ionJobserver process.  You may need to restart ionJobserver",
                "error": True
            })

        #if the above didn't fail then we can save the object
        #this object must be saved before the tmap call is made
        ref_genome.save()
        logger.debug('Saved ReferenceGenome %s' % ref_genome.__dict__)

        #kick off the anaserve tmap xmlrpc call
        import traceback
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            tmap_bool, tmap_status = conn.tmap(str(ref_genome.id), fasta,
                                               short_name, name, version,
                                               read_exclude_length,
                                               settings.TMAP_VERSION)
            logger.debug('ionJobserver process reported %s %s' %
                         (tmap_bool, tmap_status))
        except (socket.error, xmlrpclib.Fault):
            #delete the genome object, because it was not sucessful
            ref_genome.delete()
            return render_to_json({
                "status": "Error with index creation",
                "error": traceback.format_exc()
            })

        if not tmap_bool:
            ref_genome.delete()
            return render_to_json({"status": tmap_status, "error": True})

        return render_to_json({
            "status":
            "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.",
            "error": False
        })

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response(
            "rundb/configure/modal_references_new_genome.html",
            context_instance=ctx)
Esempio n. 15
0
def launch_analysis_job(result, params, doThumbnail):
    ''' Create files and send to jobServer '''
    def create_tf_conf():
        """
        Build the contents of the report TF file (``DefaultTFs.conf``)
        """
        fname = "DefaultTFs.conf"
        tfs = models.Template.objects.filter(isofficial=True).order_by('name')
        lines = ["%s,%s,%s" % (
            tf.name,
            tf.key,
            tf.sequence,
        ) for tf in tfs]

        return (fname, "\n".join(lines))

    def create_bc_conf(barcodeId, fname):
        """
        Creates a barcodeList file for use in barcodeSplit binary.

        Danger here is if the database returns a blank, or no lines, then the
        file will be written with no entries.  The use of this empty file later
        will generate no fastq files, except for the nomatch.fastq file.

        See C source code BarCode.h for list of valid keywords
        """
        # Retrieve the list of barcodes associated with the given barcodeId
        db_barcodes = models.dnaBarcode.objects.filter(
            name=barcodeId).order_by("index")
        lines = []
        for db_barcode in db_barcodes:
            lines.append(
                'barcode %d,%s,%s,%s,%s,%s,%d,%s' %
                (db_barcode.index, db_barcode.id_str, db_barcode.sequence,
                 db_barcode.adapter, db_barcode.annotation, db_barcode.type,
                 db_barcode.length, db_barcode.floworder))
        if db_barcodes:
            lines.insert(0, "file_id %s" % db_barcodes[0].name)
            lines.insert(1, "score_mode %s" % str(db_barcodes[0].score_mode))
            lines.insert(2,
                         "score_cutoff %s" % str(db_barcodes[0].score_cutoff))
        return (fname, "\n".join(lines))

    def create_pk_conf(pk):
        """
        Build the contents of the report primary key file (``primary.key``).
        """
        text = "ResultsPK = %d" % pk
        return ("primary.key", text)

    def create_meta(experiment, result):
        """Build the contents of a report metadata file (``expMeta.dat``)."""
        def get_chipcheck_status(exp):
            """
            Load the explog stored in the log field in the experiment
            table into a python dict.  Check if `calibratepassed` is set
            """
            data = exp.log
            if data.get('calibratepassed', 'Not Found'):
                return 'Passed'
            else:
                return 'Failed'

        lines = (
            "Run Name = %s" % experiment.expName,
            "Run Date = %s" % experiment.date,
            "Run Flows = %s" % experiment.flows,
            "Project = %s" % ','.join(p.name for p in result.projects.all()),
            "Sample = %s" % experiment.get_sample(),
            "Library = N/A",
            "Reference = %s" % result.eas.reference,
            "Instrument = %s" % experiment.pgmName,
            "Flow Order = %s" %
            (experiment.flowsInOrder.strip()
             if experiment.flowsInOrder.strip() != '0' else 'TACG'),
            "Library Key = %s" % result.eas.libraryKey,
            "TF Key = %s" % result.eas.tfKey,
            "Chip Check = %s" % get_chipcheck_status(experiment),
            "Chip Type = %s" % experiment.chipType,
            "Chip Data = %s" % experiment.rawdatastyle,
            "Notes = %s" % experiment.notes,
            "Barcode Set = %s" % result.eas.barcodeKitName,
            "Analysis Name = %s" % result.resultsName,
            "Analysis Date = %s" % date.today(),
            "Analysis Flows = %s" % result.processedflows,
            "runID = %s" % result.runid,
        )

        return ('expMeta.dat', '\n'.join(lines))

    # Default control script definition
    scriptname = 'TLScript.py'

    from distutils.sysconfig import get_python_lib
    python_lib_path = get_python_lib()
    scriptpath = os.path.join(python_lib_path, 'ion/reports', scriptname)
    try:
        with open(scriptpath, "r") as f:
            script = f.read()
    except Exception as error:
        raise Exception("Error reading %s\n%s" % (scriptpath, error.args))

    # test job server connection
    webRootPath = result.get_report_path()
    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        to_check = os.path.dirname(webRootPath)
    except (socket.error, xmlrpclib.Fault):
        raise Exception("Failed to contact job server.")

    # the following files will be written into result's directory
    files = []
    files.append(create_tf_conf())  # DefaultTFs.conf
    files.append(create_meta(result.experiment, result))  # expMeta.dat
    files.append(create_pk_conf(result.pk))  # primary.key
    # barcodeList.txt
    barcodeKitName = result.eas.barcodeKitName
    if barcodeKitName:
        files.append(create_bc_conf(barcodeKitName, "barcodeList.txt"))

    try:
        chips = models.Chip.objects.all()
        chip_dict = dict(
            (c.name, '-pe ion_pe %s' % str(c.slots)) for c in chips)
    except:
        chip_dict = {}  # just in case we can't read from the db

    try:
        ts_job_type = 'thumbnail' if doThumbnail else ''
        conn.startanalysis(result.resultsName, script, params, files,
                           webRootPath, result.pk, result.experiment.chipType,
                           chip_dict, ts_job_type)
    except (socket.error, xmlrpclib.Fault):
        raise Exception("Failed to contact job server.")
Esempio n. 16
0
def _combine_results_sendto_project(project_pk, json_data, username=''):
    project = Project.objects.get(id=project_pk)
    projectName = project.name

    name = json_data['name']
    mark_duplicates = json_data['mark_duplicates']
    ids_to_merge = json_data['selected_pks']

    # check reference and flow order the same in all selected results
    for pk in ids_to_merge:
        result = Results.objects.get(pk=pk)
        if pk == ids_to_merge[0]:
            reference = result.reference
            floworder = result.experiment.flowsInOrder
            barcodeId = result.eas.barcodeKitName
        else:
            if not reference == result.reference:
                raise Exception("Selected results do not have the same Alignment Reference.")
            if not floworder == result.experiment.flowsInOrder:
                floworder = ''
            if not barcodeId == result.eas.barcodeKitName:
                barcodeId = ''

    # create new entry in DB for combined Result
    delim = ':'
    filePrefix = "CombineAlignments"  # this would normally be Experiment name that's prefixed to all filenames
    result, exp = create_combined_result('CA_%s_%s' % (name, projectName))
    result.projects.add(project)
    result.resultsType = 'CombinedAlignments'
    result.parentIDs = delim + delim.join(ids_to_merge) + delim
    result.reference = reference
    result.sffLink = path.join(result.reportLink, "%s_%s.sff" % (filePrefix, result.resultsName))

    # add ExperimentAnalysisSettings
    eas_kwargs = {
            'date' : datetime.now(),
            'experiment' : exp,
            'isEditable' : False,
            'isOneTimeOverride' : True,
            'status' : 'run',
            'reference': reference,
            'barcodeKitName': barcodeId,
            'targetRegionBedFile': '',
            'hotSpotRegionBedFile': ''
    }
    eas = ExperimentAnalysisSettings(**eas_kwargs)
    eas.save()    
    result.eas = eas
    
    result.save()

    # gather parameters to pass to merging script
    links = []
    bams = []
    names = []
    plan = {}
    parents = Results.objects.filter(id__in=ids_to_merge).order_by('-timeStamp')
    for parent in parents:
        links.append(parent.reportLink)
        names.append(parent.resultsName)
        bamFile = 'rawlib.bam'
        bams.append(path.join(parent.get_report_dir(), bamFile))

        #need Plan info for VariantCaller etc. plugins: but which plan to use??
        try:
            planObj = [parent.experiment.plan]
            plan_json = serializers.serialize("json", planObj)
            plan_json = json.loads(plan_json)
            plan = plan_json[0]["fields"]
        except:
            pass

    try:
        genome = ReferenceGenome.objects.all().filter(short_name=reference, index_version=settings.TMAP_VERSION, enabled=True)[0]
        if path.exists(genome.info_text()):
            genomeinfo = genome.info_text()
        else:
            genomeinfo = ""
    except:
        genomeinfo = ""

    params = {
        'resultsName': result.resultsName,
        'parentIDs': ids_to_merge,
        'parentNames': names,
        'parentLinks': links,
        'parentBAMs': bams,
        'libraryName': result.reference,
        'tmap_version': settings.TMAP_VERSION,
        'mark_duplicates': mark_duplicates,
        'plan': plan,
        'run_name': filePrefix,
        'genomeinfo': genomeinfo,
        'flowOrder': floworder,
        'project': projectName,
        'barcodeId': barcodeId,
        'warnings': validate_results_to_combine(parents)
    }

    scriptpath = '/usr/lib/python2.6/dist-packages/ion/reports/combineReports.py'

    try:
        with open(scriptpath, "r") as f:
            script = f.read()
    except Exception as error:
        result.status = "Error reading %s\n%s" % (scriptpath, error.args)
        raise Exception(result.status)

    files = []
     # do we need expMeta?
    lines = ("Project = %s" % ','.join(p.name for p in result.projects.all()),
             "Library = %s" % result.reference,
             "Analysis Name = %s" % result.resultsName,
             "Flow Order = %s" % floworder,
             "Run Name = %s" % filePrefix
             )
    files.append(('expMeta.dat', '\n'.join(lines)))
    files.append(("primary.key", "ResultsPK = %s" % result.pk))

    webRootPath = result.web_root_path(_location())

    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        conn.startanalysis(result.resultsName, script, params, files, webRootPath, result.pk, '', {}, 'combineAlignments')
    except:
        result.status = "Failed to contact job server."
        raise Exception(result.status)

    # log project history
    message = 'Combine results %s into name= %s (%s), auto-assign to project name= %s (%s).' % (ids_to_merge, result.resultsName, result.pk, projectName, project_pk)
    EventLog.objects.add_entry(project, message, username)

    return result
Esempio n. 17
0
def new_genome(request):
    """This is the page to create a new genome. The XML-RPC server is ionJobServer.
    """

    if request.method == "POST":
        # parse the data sent in

        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', False)
        notes = request.POST.get('notes', "")

        #optional
        read_sample_size = request.POST.get('read_sample_size', False)
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)

        error_status = ""
        reference_path = REFERENCE_LIBRARY_TEMP_DIR + fasta

        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta, version)):
            return render_to_json({"status": "Form validation failed", "error": True})

        if not set(short_name).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"):
            return render_to_json({"status": "The short name has invalid characters. The valid values are letters, numbers, and underscores.", "error": True})

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({"status": "The FASTA temporary files was not found", "error": True})

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (fasta.lower().endswith(".fasta") or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a .fasta or .zip extension.  It must be a plain text fasta file or a Zip compressed fasta."

            if why_delete:
                try:
                    os.remove(reference_path)
                except OSError:
                    why_delete += " The FASTA file could not be deleted."
                return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name, index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({"status": "Failed - Genome with this short name and index version already exist.", "error": True})
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.date = datetime.datetime.now()
        ref_genome.notes = notes
        ref_genome.status = "queued"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION

        #before the object is saved we should ping the xml-rpc server to see if it is alive.
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            #just check uptime to make sure the call does not fail
            conn.uptime()
            logger.debug('Connected to ionJobserver process.')
        except (socket.error, xmlrpclib.Fault):
            return render_to_json({"status": "Unable to connect to ionJobserver process.  You may need to restart ionJobserver", "error": True})

        #if the above didn't fail then we can save the object
        #this object must be saved before the tmap call is made
        ref_genome.save()
        logger.debug('Saved ReferenceGenome %s' % ref_genome.__dict__)

        #kick off the anaserve tmap xmlrpc call
        import traceback
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            tmap_bool, tmap_status = conn.tmap(str(ref_genome.id), fasta, short_name, name, version,
                                               read_sample_size, read_exclude_length, settings.TMAP_VERSION)
            logger.debug('ionJobserver process reported %s %s' % (tmap_bool, tmap_status))
        except (socket.error, xmlrpclib.Fault):
            #delete the genome object, because it was not sucessful
            ref_genome.delete()
            return render_to_json({"status": "Error with index creation", "error": traceback.format_exc()})

        if not tmap_bool:
            ref_genome.delete()
            return render_to_json({"status": tmap_status, "error": True})

        return render_to_json({"status": "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.", "error": False})

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response("rundb/configure/modal_references_new_genome.html", context_instance=ctx)
Esempio n. 18
0
File: views.py Progetto: aidjek/TS
def _combine_results_sendto_project(project_pk, json_data, username=''):
    project = Project.objects.get(id=project_pk)
    projectName = project.name

    name = json_data['name']
    mark_duplicates = json_data['mark_duplicates']
    ids_to_merge = json_data['selected_pks']

    # check reference and flow order the same in all selected results
    for pk in ids_to_merge:
        result = Results.objects.get(pk=pk)
        if pk == ids_to_merge[0]:
            reference = result.reference
            floworder = result.experiment.flowsInOrder
            barcodeId = result.eas.barcodeKitName
        else:
            if not reference == result.reference:
                raise Exception(
                    "Selected results do not have the same Alignment Reference."
                )
            if not floworder == result.experiment.flowsInOrder:
                floworder = ''
            if not barcodeId == result.eas.barcodeKitName:
                barcodeId = ''

    # create new entry in DB for combined Result
    delim = ':'
    filePrefix = "CombineAlignments"  # this would normally be Experiment name that's prefixed to all filenames
    result, exp = create_combined_result('CA_%s_%s' % (name, projectName))
    result.projects.add(project)
    result.resultsType = 'CombinedAlignments'
    result.parentIDs = delim + delim.join(ids_to_merge) + delim
    result.reference = reference
    result.sffLink = path.join(result.reportLink,
                               "%s_%s.sff" % (filePrefix, result.resultsName))

    # add ExperimentAnalysisSettings
    eas_kwargs = {
        'date': datetime.now(),
        'experiment': exp,
        'isEditable': False,
        'isOneTimeOverride': True,
        'status': 'run',
        'reference': reference,
        'barcodeKitName': barcodeId,
        'targetRegionBedFile': '',
        'hotSpotRegionBedFile': ''
    }
    eas = ExperimentAnalysisSettings(**eas_kwargs)
    eas.save()
    result.eas = eas

    result.save()

    # gather parameters to pass to merging script
    links = []
    bams = []
    names = []
    plan = {}
    parents = Results.objects.filter(
        id__in=ids_to_merge).order_by('-timeStamp')
    for parent in parents:
        links.append(parent.reportLink)
        names.append(parent.resultsName)
        bamFile = 'rawlib.bam'
        bams.append(path.join(parent.get_report_dir(), bamFile))

        #need Plan info for VariantCaller etc. plugins: but which plan to use??
        try:
            planObj = [parent.experiment.plan]
            plan_json = serializers.serialize("json", planObj)
            plan_json = json.loads(plan_json)
            plan = plan_json[0]["fields"]
        except:
            pass

    try:
        genome = ReferenceGenome.objects.all().filter(
            short_name=reference,
            index_version=settings.TMAP_VERSION,
            enabled=True)[0]
        if path.exists(genome.info_text()):
            genomeinfo = genome.info_text()
        else:
            genomeinfo = ""
    except:
        genomeinfo = ""

    params = {
        'resultsName': result.resultsName,
        'parentIDs': ids_to_merge,
        'parentNames': names,
        'parentLinks': links,
        'parentBAMs': bams,
        'libraryName': result.reference,
        'tmap_version': settings.TMAP_VERSION,
        'mark_duplicates': mark_duplicates,
        'plan': plan,
        'run_name': filePrefix,
        'genomeinfo': genomeinfo,
        'flowOrder': floworder,
        'project': projectName,
        'barcodeId': barcodeId,
        'warnings': validate_results_to_combine(parents)
    }

    scriptpath = '/usr/lib/python2.6/dist-packages/ion/reports/combineReports.py'

    try:
        with open(scriptpath, "r") as f:
            script = f.read()
    except Exception as error:
        result.status = "Error reading %s\n%s" % (scriptpath, error.args)
        raise Exception(result.status)

    files = []
    # do we need expMeta?
    lines = ("Project = %s" % ','.join(p.name for p in result.projects.all()),
             "Library = %s" % result.reference,
             "Analysis Name = %s" % result.resultsName,
             "Flow Order = %s" % floworder, "Run Name = %s" % filePrefix)
    files.append(('expMeta.dat', '\n'.join(lines)))
    files.append(("primary.key", "ResultsPK = %s" % result.pk))

    webRootPath = result.web_root_path(_location())

    try:
        host = "127.0.0.1"
        conn = client.connect(host, settings.JOBSERVER_PORT)
        conn.startanalysis(result.resultsName, script, params, files,
                           webRootPath, result.pk, '', {}, 'combineAlignments')
    except:
        result.status = "Failed to contact job server."
        raise Exception(result.status)

    # log project history
    message = 'Combine results %s into name= %s (%s), auto-assign to project name= %s (%s).' % (
        ids_to_merge, result.resultsName, result.pk, projectName, project_pk)
    EventLog.objects.add_entry(project, message, username)

    return result