Esempio n. 1
0
def publisher_upload(request, pub_name):
    """Display the publishers upload.html template on a GET of the page.
    If the view is POSTed to, the pass the uploaded data to the publisher.
    """
    pub = models.Publisher.objects.get(name=pub_name)

    if request.method == 'POST':
        meta = request.POST.dict()
        files = request.FILES.values()

        if len(files) == 0:
            return render_to_json({"error": "Error: No file selected for upload"})
        else:
            try:
                upload, async = edit_upload(pub, files[0], json.dumps(meta))
                return render_to_json({"status": upload.status, "id": upload.id})
            except Exception as e:
                return render_to_json({"error": str(e)})
    else:
        action = request.get_full_path()
        error = ''
        contents = ''
        try:
            path = os.path.join(pub.path, 'upload.html')
            with open(path, 'r') as f:
                contents = f.read()
        except:
            error = "Error: Unable to read %s" % path

        ctx = RequestContext(request, {"contents": contents, "action": action, "error": error})
        return render_to_response('rundb/configure/modal_publisher_upload.html', context_instance=ctx)
Esempio n. 2
0
def publisher_upload(request, pub_name):
    """Display the publishers upload.html template on a GET of the page.
    If the view is POSTed to, the pass the uploaded data to the publisher.
    """
    pub = models.Publisher.objects.get(name=pub_name)

    if request.method == 'POST':
        meta = request.POST.dict()
        files = request.FILES.values()

        if len(files) == 0:
            return render_to_json({"error": "Error: No file selected for upload"})
        else:
            try:
                meta['username'] = request.user.username
                upload, async = edit_upload(pub, files[0], json.dumps(meta))
                return render_to_json({"status": upload.status, "id": upload.id})
            except Exception as e:
                return render_to_json({"error": str(e)})
    else:
        action = request.get_full_path()
        error = ''
        contents = ''
        try:
            path = os.path.join(pub.path, 'upload.html')
            with open(path, 'r') as f:
                contents = f.read()
        except:
            error = "Error: Unable to read %s" % path

        ctx = RequestContext(request, {"contents": contents, "action": action, "error": error})
        return render_to_response('rundb/configure/modal_publisher_upload.html', context_instance=ctx)
Esempio n. 3
0
def genome_status(request, reference_id):
    """Provide a way for the index creator to let us know when the index has been created"""

    if request.method == "POST":
        rg = get_object_or_404(ReferenceGenome, pk=reference_id)
        status = request.POST.get('status', False)
        enabled = request.POST.get('enabled', False)
        verbose_error = request.POST.get('verbose_error', "")
        index_version = request.POST.get('index_version', "")

        if not status:
            return render_to_json({"status": "error genome status not given"})

        rg.status = status
        rg.enabled = enabled
        rg.verbose_error = verbose_error
        rg.index_version = index_version
        rg.reference_path = os.path.join(settings.TMAP_DIR, rg.short_name)

        rg.save()
        return render_to_json({
            "status": "genome status updated",
            "enabled": enabled
        })
    if request.method == "GET":
        rg = get_object_or_404(ReferenceGenome, pk=reference_id)
        return render_to_json({"status": rg.status})
Esempio n. 4
0
def add_custom_genome(request):
    ''' Import custom genome via file upload or URL '''
    if request.method == "POST":
        url = request.POST.get("reference_url", None)
        target_file = request.POST.get('target_file', False)
        reference_args = {
            "short_name": request.POST.get("short_name"),
            "name": request.POST.get("name"),
            "version": request.POST.get("version", ""),
            "notes": request.POST.get("notes", ""),
            "index_version": ""
        }

        if target_file:
            # import via file upload
            reference_path = os.path.join(settings.TEMP_PATH, target_file)
            reference_args["source"] = reference_path

            # check expected file size
            failed = False
            reported_file_size = request.POST.get('reported_file_size', False)
            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
                if reported_file_size and (reported_file_size !=
                                           uploaded_file_size):
                    failed = "Upload error: uploaded file size is incorrect"
            except OSError:
                failed = "Upload error: temporary file not found"

            try:
                new_reference_genome(reference_args, None, target_file)
            except Exception as e:
                failed = str(e)

            if failed:
                try:
                    os.remove(reference_path)
                except OSError:
                    failed += " The FASTA file could not be deleted."

                logger.error("Failed uploading genome file: " + failed)
                return render_to_json({"status": failed, "error": True})
            else:
                return render_to_json({"error": False})
        else:
            # import via URL
            reference_args["source"] = url
            try:
                new_reference_genome(reference_args, url, None)
            except Exception as e:
                return render_to_json({"status": str(e), "error": True})
            else:
                return HttpResponseRedirect(
                    urlresolvers.reverse("configure_references"))

    elif request.method == "GET":
        return render_to_response(
            "rundb/configure/modal_references_new_genome.html",
            context_instance=RequestContext(request, {}))
Esempio n. 5
0
def write_plupload(request, pub_name):
    """file upload for plupload"""
    logger.info("Starting write plupload")

    pub = models.Publisher.objects.get(name=pub_name)
    meta = request.POST.get('meta','{}')
    logger.debug("%s" % meta)

    if request.method == 'POST':
        name = request.REQUEST.get('name', '')
        uploaded_file = request.FILES['file']
        if not name:
            name = uploaded_file.name
        logger.debug("plupload name = '%s'" % name)

        try:
            validate_plupload(request, pub_name, name)
        except Exception as err:
            return HttpResponseBadRequest(str(err))

        upload_dir = "/results/referenceLibrary/temp"
        if not os.path.exists(upload_dir):
            return render_to_json({"error": "upload path does not exist"})

        dest_path = os.path.join(upload_dir, name)

        logger.debug("plupload destination = '%s'" % dest_path)

        chunk = request.REQUEST.get('chunk', '0')
        chunks = request.REQUEST.get('chunks', '0')

        logger.debug("plupload chunk %s %s of %s" % (str(type(chunk)), str(chunk), str(chunks)))

        debug = [chunk, chunks]

        with open(dest_path, ('wb' if chunk == '0' else 'ab')) as f:
            for content in uploaded_file.chunks():
                logger.debug("content chunk = '%d'" % len(content))
                f.write(content)

        my_contentupload_id = None
        if int(chunk) + 1 >= int(chunks):
            try:
                meta = json.loads(meta)
                meta['username'] = request.user.username
                upload = move_upload(pub, dest_path, name, json.dumps(meta))
                async_upload = run_pub_scripts.delay(pub, upload)
                my_contentupload_id = upload.id
            except Exception as err:
                logger.exception("There was a problem during upload of a file for a publisher.")
            else:
                logger.info("Successfully pluploaded %s" % name)

        logger.debug("plupload done")
        return render_to_json({"chunk posted": debug, "contentupload_id": my_contentupload_id})

    else:
        return render_to_json({"method": "only post here"})
Esempio n. 6
0
def write_plupload(request, pub_name):
    """file upload for plupload"""
    logger.info("Starting write plupload")

    pub = models.Publisher.objects.get(name=pub_name)
    logger.debug("%s %s" % (str(type(request.REQUEST['meta'])), request.REQUEST['meta']))

    logger.debug("Publisher Plupload started")
    if request.method == 'POST':
        name = request.REQUEST.get('name', '')
        uploaded_file = request.FILES['file']
        if not name:
            name = uploaded_file.name
        logger.debug("plupload name = '%s'" % name)

        #check to see if a user has uploaded a file before, and if they have
        #not, make them a upload directory

        upload_dir = "/results/referenceLibrary/temp"

        if not os.path.exists(upload_dir):
            return render_to_json({"error": "upload path does not exist"})

        dest_path = os.path.join(upload_dir, name)

        logger.debug("plupload destination = '%s'" % dest_path)

        chunk = request.REQUEST.get('chunk', '0')
        chunks = request.REQUEST.get('chunks', '0')

        logger.debug("plupload chunk %s %s of %s" % (str(type(chunk)), str(chunk), str(chunks)))

        debug = [chunk, chunks]

        with open(dest_path, ('wb' if chunk == '0' else 'ab')) as f:
            for content in uploaded_file.chunks():
                logger.debug("content chunk = '%d'" % len(content))
                f.write(content)

        my_contentupload_id = None
        if int(chunk) + 1 >= int(chunks):
            try:
                upload = move_upload(pub, dest_path, name, request.REQUEST['meta'])
                async_upload = run_pub_scripts.delay(pub, upload)
                my_contentupload_id = upload.id
            except Exception as err:
                logger.exception("There was a problem during upload of a file for a publisher.")
            else:
                logger.info("Successfully pluploaded %s" % name)

        logger.debug("plupload done")
        return render_to_json({"chunk posted": debug, "contentupload_id": my_contentupload_id})

    else:
        return render_to_json({"method": "only post here"})
Esempio n. 7
0
def add_custom_genome(request):
    ''' Import custom genome via file upload or URL '''
    if request.method == "POST":
        url = request.POST.get("reference_url", None)
        target_file = request.POST.get('target_file', False)
        reference_args = {
            "short_name": request.POST.get("short_name"),
            "name": request.POST.get("name"),
            "version": request.POST.get("version", ""),
            "notes": request.POST.get("notes", ""),
            "index_version": ""
        }

        if target_file:
            # import via file upload
            reference_path = os.path.join(settings.TEMP_PATH, target_file)
            reference_args["source"] = reference_path

            # check expected file size
            failed = False
            reported_file_size = request.POST.get('reported_file_size', False)
            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
                if reported_file_size and (reported_file_size != uploaded_file_size):
                    failed = "Upload error: uploaded file size is incorrect"
            except OSError:
                failed = "Upload error: temporary file not found"

            try:
                new_reference_genome(reference_args, None, target_file)
            except Exception as e:
                failed = str(e)

            if failed:
                try:
                    os.remove(reference_path)
                except OSError:
                    failed += " The FASTA file could not be deleted."

                logger.error("Failed uploading genome file: " + failed)
                return render_to_json({"status": failed, "error": True})
            else:
                return render_to_json({"error": False})
        else:
            # import via URL
            reference_args["source"] = url
            try:
                new_reference_genome(reference_args, url, None)
            except Exception as e:
                return render_to_json({"status": str(e), "error": True})
            else:
                return HttpResponseRedirect(urlresolvers.reverse("configure_references"))

    elif request.method == "GET":
        return render_to_response("rundb/configure/modal_references_new_genome.html", context_instance=RequestContext(request, {}))
Esempio n. 8
0
File: util.py Progetto: tw7649116/TS
def readTimezone(request):
    # read /etc/timezone into the string timezone
    timezone = ''
    with open('/etc/timezone', 'rb') as fh:
        timezone = fh.read()
        fh.close()
    return render_to_json({"timezone": timezone.strip()})
Esempio n. 9
0
def download_genome(request):
    # called by "Import Preloaded Ion References"
    if request.method == "POST":
        reference_meta = request.POST.get("reference_meta", None)
        ref_annot_update = request.POST.get("ref_annot_update", None)
        reference_args = json.loads(base64.b64decode(reference_meta))
        annotation_meta = request.POST.get("missingAnnotation_meta", None)
        reference_mask_info = request.POST.get("reference_mask", None)

        if annotation_meta:
            annotation_data = json.loads(base64.b64decode(annotation_meta))
            annotation_lists = annotation_data

        # Download and register only the Ref Annotation file if Reference Genome is already Imported
        # If not, download refAnnot file and Reference Genome asynchronously
        if annotation_data and ref_annot_update:
            logger.debug(
                "Downloading Annotation File {0} with meta {1}".format(
                    annotation_data, reference_meta))
            if annotation_lists:
                download_genome_annotation(annotation_lists, reference_args)
        else:
            url = request.POST.get("reference_url", None)
            logger.debug("Downloading {0} with meta {1}".format(
                url, reference_meta))
            if url is not None:
                if annotation_lists:
                    download_genome_annotation(annotation_lists,
                                               reference_args)

                try:
                    new_reference_genome(
                        reference_args,
                        url,
                        reference_mask_filename=reference_mask_info)
                except Exception as e:
                    return render_to_json({"status": str(e), "error": True})

        return HttpResponseRedirect(
            urlresolvers.reverse("references_genome_download"))

    elif request.method == "GET":
        references = get_references() or []
        downloads = FileMonitor.objects.filter(
            tags__contains="reference").order_by('-created')
        downloads_annot = FileMonitor.objects.filter(
            tags__contains="reference_annotation").order_by('-created')

        (references, downloads_annot) = get_annotation(references,
                                                       downloads_annot)

        ctx = {
            'downloads': downloads,
            'downloads_annot': downloads_annot,
            'references': references
        }
        return render_to_response("rundb/configure/reference_download.html",
                                  ctx,
                                  context_instance=RequestContext(request))
Esempio n. 10
0
def download_genome(request):
    # called by "Import Preloaded Ion References"
    if request.method == "POST":
        reference_meta = request.POST.get("reference_meta", None)
        reference_args = json.loads(base64.b64decode(reference_meta))
        reference_mask_info = request.POST.get("reference_mask", None)

        url = request.POST.get("reference_url", None)
        logger.debug("Downloading {0} with meta {1}".format(url, reference_meta))
        if url is not None:
            try:
                new_reference_genome(reference_args, url, reference_mask_filename=reference_mask_info)
            except Exception as e:
                return render_to_json({"status": str(e), "error": True})

        return HttpResponseRedirect(urlresolvers.reverse("references_genome_download"))

    elif request.method == "GET":
        references = get_references() or []
        downloads = FileMonitor.objects.filter(tags__in=["reference","bedfile","annotation"]).order_by('-created')

        # update BED files available for pre-loaded references
        for ref in references:
            if ref['installed'] or ref.get('preInstalled'):
                bedfiles = []
                for bedfile in ref.get('bedfiles',[]):
                    status = get_bedfile_status(bedfile, ref['meta']['short_name'])
                    if status != "_installed":
                        bedfile['status'] = status
                        bedfiles.append(bedfile)

                ref['bedfiles'] = bedfiles

                annotationfiles = []
                #if ref['meta']['short_name'] is "hg19":
                #import pdb;pdb.set_trace()
                for annotationfile in ref.get('annotation', []):
                    status = get_annotationfile_status(annotationfile, ref['meta']['short_name'])
                    if status != "_installed":
                        annotationfile['status'] = status
                        annotationfiles.append(annotationfile)

                ref['annotationfiles'] = annotationfiles

        # set up page to refresh
        _in_progress_status = ["Queued", "Starting", "Downloading", "Preprocessing", "Indexing"]
        _in_progress_status += [s.lower() for s in _in_progress_status]
        downloading = downloads.filter(status__in=_in_progress_status)
        id_hashes = [r['meta']['identity_hash'] for r in references if r['meta']['identity_hash']]
        processing = ReferenceGenome.objects.filter(identity_hash__in=id_hashes, status__in=_in_progress_status)

        ctx = {
            'downloads': downloads,
            'references': references,
            'refresh_progress': downloading.count() > 0 or processing.count() > 0
        }
        return render_to_response("rundb/configure/reference_download.html", ctx, context_instance=RequestContext(request))
Esempio n. 11
0
def delete_genome(request, pk):
    """delete a reference genome
    the filesystem file deletions should be done with a method on the model"""

    if request.method == "POST":
        ref_genome = get_object_or_404(ReferenceGenome, pk=pk)

        #delete dir by default
        try_delete = ref_genome.delete()

        if not try_delete:
            #the file could not be deleted, present the user with an error message.
            return render_to_json({"status": " <strong>Error</strong> <p>Genome could not be deleted.</p> \
                                          <p>Check the file permissions for the genome on the file system at: </p> \
                                          <p><strong>" + str(ref_genome.reference_path) + "</p></strong> "})

        return render_to_json({"status": "Genome was deleted successfully"})

    if request.method == "GET":
        return render_to_json({"status": "This must be accessed via post"})
Esempio n. 12
0
def delete_genome(request, pk):
    """delete a reference genome
    the filesystem file deletions should be done with a method on the model"""

    if request.method == "POST":
        ref_genome = get_object_or_404(ReferenceGenome, pk=pk)

        #delete dir by default
        try_delete = ref_genome.delete()

        if not try_delete:
            #the file could not be deleted, present the user with an error message.
            return render_to_json({"status": " <strong>Error</strong> <p>Genome could not be deleted.</p> \
                                          <p>Check the file permissions for the genome on the file system at: </p> \
                                          <p><strong>" + str(ref_genome.reference_path) + "</p></strong> "})

        return render_to_json({"status": "Genome was deleted successfully"})

    if request.method == "GET":
        return render_to_json({"status": "This must be accessed via post"})
Esempio n. 13
0
File: util.py Progetto: dkeren/TS
def plupload_file_upload(
    request,
    dest_dir,
    request_filename_attr='name',
    request_files_attr='file',
):
    """ A shared file upload to save an uploaded file to disk when using plupload.
        request - the HTTPRequest object
        dest_dir - the destination directory to save the file within
    """
    if request.method == 'POST':
        name = request.POST.get(request_filename_attr, '')
        uploaded_file = request.FILES[request_files_attr]
        if not name:
            name = uploaded_file.name
        name, ext = os.path.splitext(name)

        #check to see if a user has uploaded a file before, and if they have
        #not, make them a upload directory
        if not os.path.exists(dest_dir):
            return render_to_json({"error": "upload path does not exist"})

        dest_path = '%s%s%s%s' % (dest_dir, os.sep, name, ext)

        chunk = request.POST.get('chunk', '0')
        chunks = request.POST.get('chunks', '0')

        debug = [chunk, chunks]

        with open(dest_path, ('wb' if chunk == 0 else 'ab')) as outfile:
            for content in uploaded_file.chunks():
                outfile.write(content)

        if int(chunk) + 1 >= int(chunks):
            #the upload has finished
            pass

        return render_to_json({"chuck posted": debug})

    else:
        return render_to_json({"method": "only POST here"})
Esempio n. 14
0
def plupload_file_upload(request, dest_dir, request_filename_attr='name', request_files_attr='file', ):
    """ A shared file upload to save an uploaded file to disk when using plupload.
        request - the HTTPRequest object
        dest_dir - the destination directory to save the file within
    """
    if request.method == 'POST':
        name = request.POST.get(request_filename_attr, '')
        uploaded_file = request.FILES[request_files_attr]
        if not name:
            name = uploaded_file.name
        name, ext = os.path.splitext(name)

        #check to see if a user has uploaded a file before, and if they have
        #not, make them a upload directory
        if not os.path.exists(dest_dir):
            return render_to_json({"error": "upload path does not exist"})

        dest_path = '%s%s%s%s' % (dest_dir, os.sep, name, ext)

        chunk = request.POST.get('chunk', '0')
        chunks = request.POST.get('chunks', '0')

        debug = [chunk, chunks]

        with open(dest_path, ('wb' if chunk == 0 else 'ab')) as outfile:
            for content in uploaded_file.chunks():
                outfile.write(content)

        if int(chunk) + 1 >= int(chunks):
            #the upload has finished
            pass

        return render_to_json({"chuck posted": debug})

    else:
        return render_to_json({"method": "only POST here"})
Esempio n. 15
0
def genome_status(request, reference_id):
    """Provide a way for the index creator to let us know when the index has been created"""

    if request.method == "POST":
        rg = get_object_or_404(ReferenceGenome, pk=reference_id)
        status = request.POST.get('status', False)
        enabled = request.POST.get('enabled', False)
        verbose_error = request.POST.get('verbose_error', "")
        index_version = request.POST.get('index_version', "")

        if not status:
            return render_to_json({"status": "error genome status not given"})

        rg.status = status
        rg.enabled = enabled
        rg.verbose_error = verbose_error
        rg.index_version = index_version
        rg.reference_path = os.path.join(settings.TMAP_DIR, rg.short_name)

        rg.save()
        return render_to_json({"status": "genome status updated", "enabled": enabled})
    if request.method == "GET":
        rg = get_object_or_404(ReferenceGenome, pk=reference_id)
        return render_to_json({"status": rg.status})
Esempio n. 16
0
def download_genome(request):
    # called by "Import Preloaded Ion References"
    if request.method == "POST":
        reference_meta = request.POST.get("reference_meta", None)
        ref_annot_update = request.POST.get("ref_annot_update", None)
        reference_args = json.loads(base64.b64decode(reference_meta))
        annotation_meta = request.POST.get("missingAnnotation_meta", None)
        reference_mask_info = request.POST.get("reference_mask", None)

        if annotation_meta:
            annotation_data = json.loads(base64.b64decode(annotation_meta))
            annotation_lists = annotation_data

        # Download and register only the Ref Annotation file if Reference Genome is already Imported
        # If not, download refAnnot file and Reference Genome asynchronously
        if annotation_data and ref_annot_update:
            logger.debug("Downloading Annotation File {0} with meta {1}".format(annotation_data, reference_meta))
            if annotation_lists:
                download_genome_annotation(annotation_lists, reference_args)
        else:
            url = request.POST.get("reference_url", None)
            logger.debug("Downloading {0} with meta {1}".format(url, reference_meta))
            if url is not None:
                if annotation_lists:
                    download_genome_annotation(annotation_lists, reference_args)

                try:
                    new_reference_genome(reference_args, url, reference_mask_filename=reference_mask_info)
                except Exception as e:
                    return render_to_json({"status": str(e), "error": True})

        return HttpResponseRedirect(urlresolvers.reverse("references_genome_download"))

    elif request.method == "GET":
        references = get_references() or []
        downloads = FileMonitor.objects.filter(tags__contains="reference").order_by('-created')
        downloads_annot = FileMonitor.objects.filter(tags__contains="reference_annotation").order_by('-created')

        (references, downloads_annot) = get_annotation(references, downloads_annot)

        ctx = {
            'downloads': downloads,
            'downloads_annot': downloads_annot,
            'references': references
        }
        return render_to_response("rundb/configure/reference_download.html", ctx, context_instance=RequestContext(request))
Esempio n. 17
0
File: genomes.py Progetto: aidjek/TS
def new_genome(request):
    """This is the page to create a new genome. The XML-RPC server is ionJobServer.
    """

    if request.method == "POST":
        # parse the data sent in

        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', False)
        notes = request.POST.get('notes', "")

        #optional
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)

        error_status = ""
        reference_path = REFERENCE_LIBRARY_TEMP_DIR + fasta

        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta, version)):
            return render_to_json({
                "status": "Form validation failed",
                "error": True
            })

        if not set(short_name).issubset(
                "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
        ):
            return render_to_json({
                "status":
                "The short name has invalid characters. The valid values are letters, numbers, and underscores.",
                "error": True
            })

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({
                    "status": "The FASTA temporary files was not found",
                    "error": True
                })

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (fasta.lower().endswith(".fasta")
                    or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a .fasta or .zip extension.  It must be a plain text fasta file or a Zip compressed fasta."

            if why_delete:
                try:
                    os.remove(reference_path)
                except OSError:
                    why_delete += " The FASTA file could not be deleted."
                return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name,
                                          index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({
                "status":
                "Failed - Genome with this short name and index version already exist.",
                "error": True
            })
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.date = datetime.datetime.now()
        ref_genome.notes = notes
        ref_genome.status = "queued"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION

        #before the object is saved we should ping the xml-rpc server to see if it is alive.
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            #just check uptime to make sure the call does not fail
            conn.uptime()
            logger.debug('Connected to ionJobserver process.')
        except (socket.error, xmlrpclib.Fault):
            return render_to_json({
                "status":
                "Unable to connect to ionJobserver process.  You may need to restart ionJobserver",
                "error": True
            })

        #if the above didn't fail then we can save the object
        #this object must be saved before the tmap call is made
        ref_genome.save()
        logger.debug('Saved ReferenceGenome %s' % ref_genome.__dict__)

        #kick off the anaserve tmap xmlrpc call
        import traceback
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            tmap_bool, tmap_status = conn.tmap(str(ref_genome.id), fasta,
                                               short_name, name, version,
                                               read_exclude_length,
                                               settings.TMAP_VERSION)
            logger.debug('ionJobserver process reported %s %s' %
                         (tmap_bool, tmap_status))
        except (socket.error, xmlrpclib.Fault):
            #delete the genome object, because it was not sucessful
            ref_genome.delete()
            return render_to_json({
                "status": "Error with index creation",
                "error": traceback.format_exc()
            })

        if not tmap_bool:
            ref_genome.delete()
            return render_to_json({"status": tmap_status, "error": True})

        return render_to_json({
            "status":
            "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.",
            "error": False
        })

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response(
            "rundb/configure/modal_references_new_genome.html",
            context_instance=ctx)
Esempio n. 18
0
def new_genome(request):
    """This is the page to create a new genome. 
    """
    def is_fasta(filename):
        ext = os.path.splitext(filename)[1]
        return ext.lower() in ['.fasta', '.fas', '.fa', '.seq']

    if request.method == "POST":
        # parse the data sent in
        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', "")
        notes = request.POST.get('notes', "")

        #optional
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)
        reference_path = os.path.join(settings.TEMP_PATH, fasta)
        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta)):
            return render_to_json({
                "status": "Form validation failed",
                "error": True
            })

        if not set(short_name).issubset(
                "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
        ):
            return render_to_json({
                "status":
                "The short name has invalid characters. The valid values are letters, numbers, and underscores.",
                "error": True
            })

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({
                    "status": "The FASTA temporary files was not found",
                    "error": True
                })

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (is_fasta(fasta) or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a FASTA or ZIP extension.  It must be a plain text a Zip compressed fasta file."
        is_zip = zipfile.is_zipfile(reference_path)
        if is_zip:
            zip_file = zipfile.ZipFile(reference_path, 'r')
            files = zip_file.namelist()
            # MAC OS zip is being compressed with __MACOSX folder Ex: '__MACOSX/', '__MACOSX/._contigs_2.fasta'.
            # Filter out those files and Upload only FASTA file
            files = [x for x in files if not 'MACOSX' in x]
            zip_file.close()
        else:
            files = [fasta]
        fasta_files = filter(lambda x: is_fasta(x), files)

        if len(fasta_files) != 1:
            why_delete = "Error: upload must contain exactly one fasta file"
        else:
            target_fasta_file = fasta_files[0]

        if why_delete:
            try:
                os.remove(reference_path)
            except OSError:
                why_delete += " The FASTA file could not be deleted."
            logger.warning("User uploaded bad fasta file: " + str(why_delete))
            return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name,
                                          index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({
                "status":
                "Failed - Genome with this short name and index version already exist.",
                "error": True
            })
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.notes = notes
        ref_genome.status = "preprocessing"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION
        ref_genome.save()
        logger.debug("Created new reference: %d/%s" %
                     (ref_genome.pk, ref_genome))

        temp_dir = tempfile.mkdtemp(suffix=short_name, dir=settings.TEMP_PATH)
        temp_upload_path = os.path.join(temp_dir, fasta)
        os.chmod(temp_dir, 0777)
        os.rename(reference_path, temp_upload_path)
        monitor = FileMonitor(local_dir=temp_dir, name=fasta)
        monitor.save()
        ref_genome.file_monitor = monitor
        ref_genome.reference_path = temp_upload_path
        ref_genome.save()

        index_task = tasks.build_tmap_index.subtask((ref_genome.id, ),
                                                    immutable=True)
        if is_zip:
            result = tasks.unzip_reference.apply_async(
                args=(ref_genome.id, target_fasta_file), link=index_task)
        else:
            result = tasks.copy_reference.apply_async(args=(ref_genome.id, ),
                                                      link=index_task)
        ref_genome.status = "queued"
        ref_genome.celery_task_id = result.task_id
        ref_genome.save()
        return render_to_json({
            "status":
            "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.",
            "error": False
        })

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response(
            "rundb/configure/modal_references_new_genome.html",
            context_instance=ctx)
Esempio n. 19
0
def new_genome(request):
    """This is the page to create a new genome. 
    """

    if request.method == "POST":
        # parse the data sent in
        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', "")
        notes = request.POST.get('notes', "")

        #optional
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)
        reference_path = os.path.join(settings.TEMP_PATH, fasta)
        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta)):
            return render_to_json({"status": "Form validation failed", "error": True})

        if not set(short_name).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"):
            return render_to_json({"status": "The short name has invalid characters. The valid values are letters, numbers, and underscores.", "error": True})

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({"status": "The FASTA temporary files was not found", "error": True})

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (fasta.lower().endswith(".fasta") or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a .fasta or .zip extension.  It must be a plain text fasta file or a Zip compressed fasta."
        is_zip = zipfile.is_zipfile(reference_path)
        if is_zip:
            zip_file = zipfile.ZipFile(reference_path, 'r')
            files = zip_file.namelist()
            # MAC OS zip is being compressed with __MACOSX folder Ex: '__MACOSX/', '__MACOSX/._contigs_2.fasta'.
            # Filter out those files and Upload only FASTA file
            files = [x for x in files if not 'MACOSX' in x]
            zip_file.close()
        else:
            files = [fasta]
        fasta_files = filter(lambda x: x.endswith('.fa') or x.endswith('.fasta'), files)

        if len(fasta_files) != 1:
            why_delete = "Error: upload must contain exactly one fasta file"
        else:
            target_fasta_file = fasta_files[0]

        if why_delete:
            try:
                os.remove(reference_path)
            except OSError:
                why_delete += " The FASTA file could not be deleted."
            logger.warning("User uploaded bad fasta file: " + str(why_delete))
            return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name, index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({"status": "Failed - Genome with this short name and index version already exist.", "error": True})
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.notes = notes
        ref_genome.status = "preprocessing"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION
        ref_genome.save()
        logger.debug("Created new reference: %d/%s" % (ref_genome.pk, ref_genome))

        temp_dir = tempfile.mkdtemp(suffix=short_name, dir=settings.TEMP_PATH)
        temp_upload_path = os.path.join(temp_dir, fasta)
        os.chmod(temp_dir, 0777)
        os.rename(reference_path, temp_upload_path)
        monitor = FileMonitor(
            local_dir=temp_dir,
            name=fasta
        )
        monitor.save()
        ref_genome.file_monitor = monitor
        ref_genome.reference_path = temp_upload_path
        ref_genome.save()


        index_task = tasks.build_tmap_index.subtask((ref_genome.id,), immutable=True)
        if is_zip:
            result = tasks.unzip_reference.apply_async(
                args=(ref_genome.id, target_fasta_file),
                link=index_task
            )
        else:
            result = tasks.copy_reference.apply_async(
                args=(ref_genome.id,), 
                link=index_task
            )
        ref_genome.status = "queued"
        ref_genome.celery_task_id = result.task_id
        ref_genome.save()
        return render_to_json({"status": "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.", "error": False})

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response("rundb/configure/modal_references_new_genome.html", context_instance=ctx)
Esempio n. 20
0
def write_plupload(request, pub_name):
    """file upload for plupload"""
    logger.info("Starting write plupload")

    pub = models.Publisher.objects.get(name=pub_name)
    meta = request.POST.get("meta", "{}")
    logger.debug("%s" % meta)

    if request.method == "POST":
        name = request.REQUEST.get("name", "")
        uploaded_file = request.FILES["file"]
        if not name:
            name = uploaded_file.name
        logger.debug("plupload name = '%s'" % name)

        try:
            labelsContent = labels.TargetRegionsContent  # default case
            if RepresentsJSON(meta):
                metaJson = json.loads(meta)
                labelsContent = (labels.HotspotsContent
                                 if metaJson and metaJson["hotspot"] else
                                 labels.TargetRegionsContent)

            file_name = validate_plupload(request, pub_name, name,
                                          labelsContent)
        except Exception as err:
            return HttpResponseBadRequest(unicode(err))

        upload_dir = "/results/referenceLibrary/temp"
        if not os.path.exists(upload_dir):
            return render_to_json(
                {"error": validation.missing_error(upload_dir)})

        dest_path = os.path.join(upload_dir, file_name)

        logger.debug("plupload destination = '%s'" % dest_path)

        chunk = request.REQUEST.get("chunk", "0")
        chunks = request.REQUEST.get("chunks", "0")

        logger.debug("plupload chunk %s %s of %s" %
                     (str(type(chunk)), str(chunk), str(chunks)))

        debug = [chunk, chunks]

        with open(dest_path, ("wb" if chunk == "0" else "ab")) as f:
            for content in uploaded_file.chunks():
                logger.debug("content chunk = '%d'" % len(content))
                f.write(content)

        my_contentupload_id = None
        if int(chunk) + 1 >= int(chunks):
            try:
                meta = json.loads(meta)
                meta["username"] = request.user.username
                upload = move_upload(pub, dest_path, file_name,
                                     json.dumps(meta))
                async_upload = run_pub_scripts.delay(pub, upload)
                my_contentupload_id = upload.id
            except Exception as err:
                logger.exception(
                    "There was a problem during upload of a file for a publisher."
                )
            else:
                logger.info("Successfully pluploaded %s" % name)

        logger.debug("plupload done")
        return render_to_json({
            "chunk posted": debug,
            "contentupload_id": my_contentupload_id
        })

    else:
        return render_to_json({
            "method":
            i18n_errors.fatal_unsupported_http_method_expected(
                request.method, "POST")
        })
Esempio n. 21
0
def write_plupload(request, pub_name):
    """file upload for plupload"""
    logger.info("Starting write plupload")

    pub = models.Publisher.objects.get(name=pub_name)
    logger.debug("%s %s" %
                 (str(type(request.REQUEST['meta'])), request.REQUEST['meta']))

    logger.debug("Publisher Plupload started")
    if request.method == 'POST':
        name = request.REQUEST.get('name', '')
        uploaded_file = request.FILES['file']
        if not name:
            name = uploaded_file.name
        logger.debug("plupload name = '%s'" % name)

        #check to see if a user has uploaded a file before, and if they have
        #not, make them a upload directory

        upload_dir = "/results/referenceLibrary/temp"

        if not os.path.exists(upload_dir):
            return render_to_json({"error": "upload path does not exist"})

        dest_path = os.path.join(upload_dir, name)

        logger.debug("plupload destination = '%s'" % dest_path)

        chunk = request.REQUEST.get('chunk', '0')
        chunks = request.REQUEST.get('chunks', '0')

        logger.debug("plupload chunk %s %s of %s" %
                     (str(type(chunk)), str(chunk), str(chunks)))

        debug = [chunk, chunks]

        with open(dest_path, ('wb' if chunk == '0' else 'ab')) as f:
            for content in uploaded_file.chunks():
                logger.debug("content chunk = '%d'" % len(content))
                f.write(content)

        my_contentupload_id = None
        if int(chunk) + 1 >= int(chunks):
            try:
                upload = move_upload(pub, dest_path, name,
                                     request.REQUEST['meta'])
                async_upload = run_pub_scripts.delay(pub, upload)
                my_contentupload_id = upload.id
            except Exception as err:
                logger.exception(
                    "There was a problem during upload of a file for a publisher."
                )
            else:
                logger.info("Successfully pluploaded %s" % name)

        logger.debug("plupload done")
        return render_to_json({
            "chunk posted": debug,
            "contentupload_id": my_contentupload_id
        })

    else:
        return render_to_json({"method": "only post here"})
Esempio n. 22
0
def download_genome(request):
    # called by "Import Preloaded Ion References"
    if request.method == "POST":
        reference_meta = request.POST.get("reference_meta", None)
        ref_annot_update = request.POST.get("ref_annot_update", None)
        reference_args = json.loads(base64.b64decode(reference_meta))
        annotation_meta = request.POST.get("missingAnnotation_meta", None)
        reference_mask_info = request.POST.get("reference_mask", None)

        if annotation_meta:
            annotation_data = json.loads(base64.b64decode(annotation_meta))
            annotation_lists = annotation_data

        # Download and register only the Ref Annotation file if Reference Genome is already Imported
        # If not, download refAnnot file and Reference Genome asynchronously
        if annotation_data and ref_annot_update:
            logger.debug(
                "Downloading Annotation File {0} with meta {1}".format(
                    annotation_data, reference_meta))
            if annotation_lists:
                download_genome_annotation(annotation_lists, reference_args)
        else:
            url = request.POST.get("reference_url", None)
            logger.debug("Downloading {0} with meta {1}".format(
                url, reference_meta))
            if url is not None:
                if annotation_lists:
                    download_genome_annotation(annotation_lists,
                                               reference_args)

                try:
                    new_reference_genome(
                        reference_args,
                        url,
                        reference_mask_filename=reference_mask_info)
                except Exception as e:
                    return render_to_json({"status": str(e), "error": True})

        return HttpResponseRedirect(
            urlresolvers.reverse("references_genome_download"))

    elif request.method == "GET":
        references = get_references() or []
        downloads = FileMonitor.objects.filter(
            tags__in=["reference", "bedfile"]).order_by('-created')
        downloads_annot = FileMonitor.objects.filter(
            tags__contains="reference_annotation").order_by('-created')

        (references, downloads_annot) = get_annotation(references,
                                                       downloads_annot)

        # update BED files available for pre-loaded references
        for ref in references:
            if ref['installed']:
                bedfiles = []
                for bedfile in ref.get('bedfiles', []):
                    status = get_bedfile_status(bedfile,
                                                ref['meta']['short_name'])
                    if status != "_installed":
                        bedfile['status'] = status
                        bedfiles.append(bedfile)

                ref['bedfiles'] = bedfiles

        # set up page to refresh
        _in_progress_status = [
            "Queued", "Starting", "Downloading", "Preprocessing", "Indexing"
        ]
        _in_progress_status += [s.lower() for s in _in_progress_status]
        downloading = downloads.filter(
            status__in=_in_progress_status) | downloads_annot.filter(
                status__in=_in_progress_status)
        id_hashes = [
            r['meta']['identity_hash'] for r in references
            if r['meta']['identity_hash']
        ]
        processing = ReferenceGenome.objects.filter(
            identity_hash__in=id_hashes, status__in=_in_progress_status)

        ctx = {
            'downloads': downloads,
            'downloads_annot': downloads_annot,
            'references': references,
            'refresh_progress': downloading.count() > 0
            or processing.count() > 0
        }
        return render_to_response("rundb/configure/reference_download.html",
                                  ctx,
                                  context_instance=RequestContext(request))
Esempio n. 23
0
def new_genome(request):
    """This is the page to create a new genome. The XML-RPC server is ionJobServer.
    """

    if request.method == "POST":
        # parse the data sent in

        #required
        name = request.POST.get('name', False)
        short_name = request.POST.get('short_name', False)
        fasta = request.POST.get('target_file', False)
        version = request.POST.get('version', False)
        notes = request.POST.get('notes', "")

        #optional
        read_sample_size = request.POST.get('read_sample_size', False)
        read_exclude_length = request.POST.get('read_exclude_length', False)

        #URL download
        url = request.POST.get('url', False)

        error_status = ""
        reference_path = REFERENCE_LIBRARY_TEMP_DIR + fasta

        why_delete = ""

        #if any of those were false send back a failed message
        if not all((name, short_name, fasta, version)):
            return render_to_json({"status": "Form validation failed", "error": True})

        if not set(short_name).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"):
            return render_to_json({"status": "The short name has invalid characters. The valid values are letters, numbers, and underscores.", "error": True})

        #TODO: check to make sure the zip file only has one fasta or fa
        if not url:
            #check to ensure the size on the OS the same as the reported.
            reported_file_size = request.POST.get('reported_file_size', False)

            try:
                uploaded_file_size = str(os.path.getsize(reference_path))
            except OSError:
                return render_to_json({"status": "The FASTA temporary files was not found", "error": True})

            if reported_file_size != uploaded_file_size:
                why_delete = "The file you uploaded differs from the expected size. This is due to an error uploading."

            if not (fasta.lower().endswith(".fasta") or fasta.lower().endswith(".zip")):
                why_delete = "The file you uploaded does not have a .fasta or .zip extension.  It must be a plain text fasta file or a Zip compressed fasta."

            if why_delete:
                try:
                    os.remove(reference_path)
                except OSError:
                    why_delete += " The FASTA file could not be deleted."
                return render_to_json({"status": why_delete, "error": True})

        #Make an genome ref object
        if ReferenceGenome.objects.filter(short_name=short_name, index_version=settings.TMAP_VERSION):
            #check to see if the genome already exists in the database with the same version
            return render_to_json({"status": "Failed - Genome with this short name and index version already exist.", "error": True})
        ref_genome = ReferenceGenome()
        ref_genome.name = name
        ref_genome.short_name = short_name
        ref_genome.version = version
        ref_genome.date = datetime.datetime.now()
        ref_genome.notes = notes
        ref_genome.status = "queued"
        ref_genome.enabled = False
        ref_genome.index_version = settings.TMAP_VERSION

        #before the object is saved we should ping the xml-rpc server to see if it is alive.
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            #just check uptime to make sure the call does not fail
            conn.uptime()
            logger.debug('Connected to ionJobserver process.')
        except (socket.error, xmlrpclib.Fault):
            return render_to_json({"status": "Unable to connect to ionJobserver process.  You may need to restart ionJobserver", "error": True})

        #if the above didn't fail then we can save the object
        #this object must be saved before the tmap call is made
        ref_genome.save()
        logger.debug('Saved ReferenceGenome %s' % ref_genome.__dict__)

        #kick off the anaserve tmap xmlrpc call
        import traceback
        try:
            conn = client.connect(JOBSERVER_HOST, settings.JOBSERVER_PORT)
            tmap_bool, tmap_status = conn.tmap(str(ref_genome.id), fasta, short_name, name, version,
                                               read_sample_size, read_exclude_length, settings.TMAP_VERSION)
            logger.debug('ionJobserver process reported %s %s' % (tmap_bool, tmap_status))
        except (socket.error, xmlrpclib.Fault):
            #delete the genome object, because it was not sucessful
            ref_genome.delete()
            return render_to_json({"status": "Error with index creation", "error": traceback.format_exc()})

        if not tmap_bool:
            ref_genome.delete()
            return render_to_json({"status": tmap_status, "error": True})

        return render_to_json({"status": "The genome index is being created.  This might take a while, check the status on the references tab. \
                                You are being redirected there now.", "error": False})

    elif request.method == "GET":
        ctx = RequestContext(request, {})
        return render_to_response("rundb/configure/modal_references_new_genome.html", context_instance=ctx)