Beispiel #1
0
def generate_file_attributes(file_id):
    chunked_upload = ChunkedUpload.objects.get(id=int(file_id))

    header_class = "Headingx"
    cell_class = "Cellx"
    row_class = "Rowx"

    size = u.filesize_toString(chunked_upload.offset)

    uploaded_on = (str(chunked_upload.completed_on)).split(".")[0]
    dt_format1 = '%Y-%m-%d %H:%M:%S'
    dt_format2 = '%d-%m-%Y %H:%M:%S'
    uploaded_on = datetime.datetime.strptime(str(uploaded_on), dt_format1).strftime(dt_format2)
    hash = chunked_upload.hash

    html_tag = "<div class='Tablex'>"
    # row
    html_tag += "<div class='{row_class!s}'>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p><strong>Size:</strong></p></div>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p>{size!s}</p></div>".format(**locals())
    html_tag += "</div>"
    # row
    html_tag += "<div class='{row_class!s}'>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p><strong>Uploaded on:</strong></p></div>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p>{uploaded_on!s}</p></div>".format(**locals())
    html_tag += "</div>"
    # row
    html_tag += "<div class='{row_class!s}'>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p><strong>Hash:</strong></p></div>".format(**locals())
    html_tag += "<div class='{cell_class!s}'><p>{hash!s}</p></div>".format(**locals())
    html_tag += "</div>"

    html_tag += "</div>"

    return html_tag
Beispiel #2
0
def populate_exp_modal(request):
    # this method gets the current files associated with an ENA experiment or group of ENA experiments
    # and populates a table in the upload modal dialogue along with delete functionality
    data_modal_id = request.GET.get('data_modal_id')
    # get experiments
    exps = EnaCollection().get_experiments_by_modal_id(data_modal_id)

    output_files = []

    for exp in exps:
        # for each experiment get a list of the associated files
        files = EnaCollection().get_files_by_experiment_id(exp["experiments"][0]["_id"])
        for file in files:
            # get chunked upload object
            ch = ChunkedUpload.objects.get(id=file['files']["chunked_upload_id"])
            # now populate output object
            f = {}
            f['id'] = str(ch.id)
            f['name'] = ch.filename
            f['size'] = u.filesize_toString(ch.offset)
            f['md5'] = file['files']["hash"]
            f['data_modal_id'] = data_modal_id
            f['panel_id'] = exp["experiments"][0]["panel_id"]
            f['experiment_id'] = str(exp["experiments"][0]["_id"])
            output_files.append(f)

    return HttpResponse(jsonpickle.encode(output_files), content_type='text/plain')
Beispiel #3
0
    def create_transfer(self, submission_id, file_path=None):
        # before creating a new transfer record for this submission, remove all others
        remote_record = self.get_by_sub_id(submission_id)
        if remote_record:
            self.delete_transfer(str(remote_record["_id"]))

        fields = data_utils.json_to_pytype(
            DB_TEMPLATES['REMOTE_FILE_COLLECTION'])
        fields['submission_id'] = submission_id
        fields['profile_id'] = self.profile_id
        fields['file_path'] = file_path
        transfer_time = datetime.now().strftime("%d-%m-%Y %H:%M:%S")
        fields["commenced_on"] = transfer_time
        fields["current_time"] = transfer_time
        fields["transfer_rate"] = ""

        if file_path:
            d = DataFile().GET(submission_id)
            chunked_upload = ChunkedUpload.objects.get(id=int(d['file_id']))
            fields["file_size_bytes"] = u.filesize_toString(
                chunked_upload.offset)

        doc = self.RemoteFileCollection.insert(fields)

        # return inserted record
        df = self.GET(str(doc))
        return df
Beispiel #4
0
def zip_file(request):
    # need to get a reference to the file to zip
    file_id = request.GET['file_id']
    print("zip started " + file_id)
    file_obj = ChunkedUpload.objects.get(pk=file_id)

    # get the name of the file to zip and change its suffix to .gz
    output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
    output_file_name = file_obj.filename + '.gz'
    try:
        # open the file as gzip acrchive...set compression level
        temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
        myzip = gzip.open(temp_name, 'wb', compresslevel=1)
        src = open(output_file_location, 'r')

        # write input file to gzip archive in n byte chunks
        n = 100000000
        for chunk in iter(lambda: src.read(n), ''):
            myzip.write(bytes(chunk, 'UTF-8'))
    finally:
        myzip.close()
        src.close()

    print('zip complete ' + file_id)
    # now need to delete the old file and update the file record with the new file
    new_file_name = output_file_location + '.gz'
    os.rename(temp_name, new_file_name)
    os.remove(output_file_location)

    # calculate new file size
    stats = os.stat(new_file_name)
    new_file_size = stats.st_size / 1000 / 1000

    # update filename
    file_obj.filename = output_file_name
    file_obj.file.name = new_file_name

    # update file size
    file_obj.offset = stats.st_size
    file_obj.save()

    out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}

    # update record in mongo
    record_object = DataFile().get_by_file_id(file_id)
    auto_fields = dict()
    auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
    auto_fields[DataFile().get_qualified_field("name")] = output_file_name
    auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name

    BrokerDA(target_id=str(record_object.get("_id", str())),
             component="datafile",
             auto_fields=auto_fields
             ).do_save_edit()

    out = jsonpickle.encode(out)
    return HttpResponse(out, content_type='text/plain')
Beispiel #5
0
def inspect_file(request):
    # utillity method to examine a file and return meta-data to the frontend
    output_dict = {'file_type': 'unknown', 'do_compress': False}
    # get reference to file
    file_id = request.GET['file_id']
    chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
    file_name = os.path.join(MEDIA_ROOT, chunked_upload.file.name)

    # size threshold to determine if a file should be compressed
    zip_threshold = 200000000  # size in bytes

    # check if file is compressed
    is_zipped = u.is_gzipped(file_name)

    if chunked_upload.offset >= zip_threshold and not is_zipped:
        output_dict['do_compress'] = True

    # check for file type
    if u.is_pdf_file(file_name):
        output_dict['file_type'] = 'pdf'
    else:
        try:
            if (u.is_fastq_file(file_name)):
                output_dict['file_type'] = 'fastq'
            elif (u.is_sam_file(file_name)):
                output_dict['file_type'] = 'sam'
            elif (u.is_bam_file(file_name)):
                output_dict['file_type'] = 'bam'
        except:
            output_dict['file_type'] = ''

    # add datafile schema
    chunked_upload.type = output_dict['file_type']
    chunked_upload.save()

    # ...and obtain the inserted record
    profile_id = request.session['profile_id']
    df = DataFile(profile_id).save_datafile(file_id)
    output_dict['data_file_id'] = str(df["_id"])
    output_dict['table_data'] = htags.generate_copo_datafiles_data(profile_id, df)

    out = jsonpickle.encode(output_dict)
    return HttpResponse(out, content_type='json')
Beispiel #6
0
def save_ena_sample_callback(request):
    # get sample form list, attribute list, and the collection id
    # collection_id = request.GET['collection_id']
    details_id = request.GET['study_id']
    sample_id = request.GET['sample_id']
    # get details of user enetered sample
    sample = jsonpickle.decode(request.GET['sample_details'])
    attr = jsonpickle.decode(request.GET['sample_attr'])

    if sample_id == '':
        EnaCollection().add_sample_to_study(sample, attr, details_id)
    else:
        EnaCollection().update_sample_in_study(sample, attr, details_id, sample_id)

    # now clear attributes and readd the new set
    out = u.get_sample_html_from_details_id(details_id)

    return HttpResponse(out, content_type='html')
Beispiel #7
0
    def create_transfer(self, submission_id, file_path=None):
        fields = data_utils.json_to_pytype(DB_TEMPLATES['REMOTE_FILE_COLLECTION'])
        fields['submission_id'] = submission_id
        fields['profile_id'] = self.profile_id
        fields['file_path'] = file_path
        transfer_time = datetime.now().strftime("%d-%m-%Y %H:%M:%S")
        fields["commenced_on"] = transfer_time
        fields["current_time"] = transfer_time
        fields["transfer_rate"] = []

        if file_path:
            d = DataFile().GET(submission_id)
            chunked_upload = ChunkedUpload.objects.get(id=int(d['file_id']))
            fields["file_size_bytes"] = u.filesize_toString(chunked_upload.offset)

        doc = self.RemoteFileCollection.insert(fields)

        # return inserted record
        df = self.GET(str(doc))
        return df
Beispiel #8
0
def zip_file(request):
    # need to get a reference to the file to zip
    file_id = request.GET['file_id']
    print("zip started " + file_id)
    file_obj = ChunkedUpload.objects.get(pk=file_id)

    # get the name of the file to zip and change its suffix to .gz
    output_file_location = os.path.join(settings.MEDIA_ROOT,
                                        file_obj.file.name)
    output_file_name = file_obj.filename + '.gz'
    try:
        # open the file as gzip acrchive...set compression level
        temp_name = os.path.join(settings.MEDIA_ROOT,
                                 str(uuid.uuid4()) + '.tmp')
        myzip = gzip.open(temp_name, 'wb', compresslevel=1)
        src = open(output_file_location, 'r')

        # write input file to gzip archive in n byte chunks
        n = 100000000
        for chunk in iter(lambda: src.read(n), ''):
            myzip.write(bytes(chunk, 'UTF-8'))
    finally:
        myzip.close()
        src.close()

    print('zip complete ' + file_id)
    # now need to delete the old file and update the file record with the new file
    new_file_name = output_file_location + '.gz'
    os.rename(temp_name, new_file_name)
    os.remove(output_file_location)

    # calculate new file size
    stats = os.stat(new_file_name)
    new_file_size = stats.st_size / 1000 / 1000

    # update filename
    file_obj.filename = output_file_name
    file_obj.file.name = new_file_name

    # update file size
    file_obj.offset = stats.st_size
    file_obj.save()

    out = {
        'zipped': True,
        'file_name': output_file_name,
        'file_size': new_file_size
    }

    # update record in mongo
    record_object = DataFile().get_by_file_id(file_id)
    auto_fields = dict()
    auto_fields[DataFile().get_qualified_field(
        "file_size")] = u.filesize_toString(file_obj.offset)
    auto_fields[DataFile().get_qualified_field("name")] = output_file_name
    auto_fields[DataFile().get_qualified_field(
        "file_location")] = new_file_name

    profile_id = request.session['profile_id']
    component = "datafile"

    BrokerDA(target_id=str(record_object.get("_id", str())),
             component=component,
             auto_fields=auto_fields).do_save_edit()

    out = jsonpickle.encode(out)
    return HttpResponse(out, content_type='json')
Beispiel #9
0
def inspect_file(request):
    # utility method to examine a file and return meta-data to the frontend
    output_dict = {'file_type': 'unknown', 'do_compress': False}

    # get reference to file
    file_id = request.GET['file_id']

    chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
    file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)

    # size threshold to determine if a file should be compressed
    zip_threshold = 200000000  # size in bytes

    # check if file is compressed
    is_zipped = u.is_gzipped(file_name)

    if chunked_upload.offset >= zip_threshold and not is_zipped:
        output_dict['do_compress'] = True

    # check for file type
    if u.is_pdf_file(file_name):
        output_dict['file_type'] = 'pdf'
    else:
        try:
            if u.is_fastq_file(file_name):
                output_dict['file_type'] = 'fastq'
                if not is_zipped:
                    output_dict['do_compress'] = True
            elif u.is_sam_file(file_name):
                output_dict['file_type'] = 'sam'
                if not is_zipped:
                    output_dict['do_compress'] = True
            elif u.is_bam_file(file_name):
                output_dict['file_type'] = 'bam'
                if not is_zipped:
                    output_dict['do_compress'] = True

            else:  # make file type same as extension
                output_dict['file_type'] = chunked_upload.filename.rsplit(
                    '.')[1]
        except:
            output_dict['file_type'] = 'unknown'

    # add datafile schema
    chunked_upload.type = output_dict['file_type']
    chunked_upload.save()

    # ...and obtain the inserted record
    profile_id = request.session['profile_id']
    component = "datafile"

    auto_fields = dict()
    auto_fields[DataFile().get_qualified_field("file_id")] = file_id
    auto_fields[DataFile().get_qualified_field(
        "file_type")] = output_dict['file_type']
    auto_fields[DataFile().get_qualified_field("file_location")] = file_name
    auto_fields[DataFile().get_qualified_field(
        "file_size")] = u.filesize_toString(chunked_upload.offset)
    auto_fields[DataFile().get_qualified_field(
        "name")] = chunked_upload.filename

    # get default type from schema
    type = [
        f for f in d_utils.get_copo_schema(component)
        if f.get("id").split(".")[-1] == "type"
    ]
    if type:
        type = type[0]["default_value"]
        auto_fields[DataFile().get_qualified_field("type")] = type

    df = BrokerDA(context=dict(),
                  profile_id=profile_id,
                  component=component,
                  auto_fields=auto_fields,
                  visualize="last_record").do_save_edit().get(
                      "record_object", dict())

    out = jsonpickle.encode(output_dict)
    return HttpResponse(out, content_type='json')
Beispiel #10
0
def inspect_file(request):
    # utility method to examine a file and return meta-data to the frontend
    output_dict = {'file_type': 'unknown', 'do_compress': False}

    # get reference to file
    file_id = request.GET['file_id']

    chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
    file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)

    # size threshold to determine if a file should be compressed
    zip_threshold = 200000000  # size in bytes

    # check if file is compressed
    is_zipped = u.is_gzipped(file_name)

    if chunked_upload.offset >= zip_threshold and not is_zipped:
        output_dict['do_compress'] = True

    # check for file type
    if u.is_pdf_file(file_name):
        output_dict['file_type'] = 'pdf'
    else:
        try:
            if u.is_fastq_file(file_name):
                output_dict['file_type'] = 'fastq'
                if not is_zipped:
                    output_dict['do_compress'] = True
            elif u.is_sam_file(file_name):
                output_dict['file_type'] = 'sam'
                if not is_zipped:
                    output_dict['do_compress'] = True
            elif u.is_bam_file(file_name):
                output_dict['file_type'] = 'bam'
                if not is_zipped:
                    output_dict['do_compress'] = True

            else:  # make file type same as extension
                output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
        except:
            output_dict['file_type'] = 'unknown'

    # add datafile schema
    chunked_upload.type = output_dict['file_type']
    chunked_upload.save()

    # ...and obtain the inserted record
    profile_id = request.session['profile_id']
    component = "datafile"

    auto_fields = dict()
    auto_fields[DataFile().get_qualified_field("file_id")] = file_id
    auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
    auto_fields[DataFile().get_qualified_field("file_location")] = file_name
    auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
    auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename

    # get default type from schema
    type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
    if type:
        type = type[0]["default_value"]
        auto_fields[DataFile().get_qualified_field("type")] = type

    df = BrokerDA(context=dict(),
                  profile_id=profile_id,
                  component=component,
                  auto_fields=auto_fields,
                  visualize="last_record"
                  ).do_save_edit().get("record_object", dict())

    # do visualise
    table_data = BrokerVisuals(
        profile_id=profile_id,
        context=output_dict,
        component=component,
        record_object=df
    ).do_row_data().get("table_data", dict())

    output_dict['table_data'] = table_data

    out = jsonpickle.encode(output_dict)
    return HttpResponse(out, content_type='json')
Beispiel #11
0
def populate_samples_form(request):
    collection_id = request.GET['collection_id']
    collection_id = 1
    out = u.get_sample_html_from_collection_id(collection_id)
    return HttpResponse(out, content_type='html')