Exemplo n.º 1
0
def create_mapping_index_page(mapping_file_pathname, template):
    """Create an index html page for a mapping. e.g. http://airm.aero/developers/fixm-4.2.0-to-airm-1.0.0.html

  Keyword arguments:
    mapping_file_pathname -- string defining the location and name of the mapping e.g. data/xlsx/mapping FIXM 4.2.0.xlsx
    template -- string defining the location and name of the html template e.g. data/html/templates/concept-list-template.html
  """

    import mapping
    mapping = mapping.Mapping(mapping_file_pathname)
    mapping_metadata = mapping.metadata
    mapping_dict = mapping.dictionary

    import utils
    utils.create_directory("docs/airm/developers/" +
                           mapping_metadata["url_name"])
    soup = utils.create_html_soup(template)

    soup.title.string = mapping_metadata[
        "name"] + " | Semantic Correspondences | AIRM.aero"
    soup.find(text="MAPPING_NAME_BC").replace_with(mapping_metadata["name"])
    soup.find(text="MAPPING_NAME_H2").replace_with(mapping_metadata["name"])

    for record in mapping_dict:
        new_table_row = create_index_table_row(record, mapping_metadata)
        soup.find('tbody').insert(1, new_table_row)

    f = open("docs/airm/developers/" + mapping_metadata["url_name"] + ".html",
             "w+")
    f.write(soup.prettify())
    f.close()
Exemplo n.º 2
0
def main():
    #concatenation
    concatProc = concatenation.Concatenation("concatInput.txt")
    concatProc.run_concat()
    #mapping
    mappingProc = mapping.Mapping("mappingInput.txt")
    mappingProc.run_mapping()
Exemplo n.º 3
0
def main():
    #concatenation
    concat = raw_input("Are you going to run concatenation? (y/n)")
    ran_concatenation = (concat == 'y')
    if (ran_concatenation):
        concatProc = concatenation.Concatenation("concatInput.txt")
        concatProc.run_concat()
    #mapping
    mappingProc = mapping.Mapping("mappingInput.txt", ran_concatenation)
    mappingProc.run_mapping()
Exemplo n.º 4
0
def add_mapping_to_connected_index(connected_index, mapping_file_pathname):
  import mapping
  mapping = mapping.Mapping(mapping_file_pathname)
  mapping_dict = mapping.dataframe.to_dict('records')
  mapping_metadata = mapping.metadata
  model_name = str(mapping_metadata["name"]).replace(" to AIRM 1.0.0", "")
  path = mapping.metadata["url_name"]

  for entry in mapping_dict:
    sem_correspondences = str(entry['AIRM Concept Identifier']).split('\n')
    for line in sem_correspondences:
      urn = line
      if str(entry["Data Concept"]) == "missing data":
        concept = str(entry["Information Concept"])
        target = str(entry["Information Concept"])+".html"
      else:
        concept = str(entry["Data Concept"])
        target = str(entry["Information Concept"])+".html#"+str(entry["Data Concept"])
      
      connected_index.append({"airm_urn": urn, "model_name": model_name, "model_path": path ,"concept_name": concept, "concept_target": target})
  
  return connected_index
Exemplo n.º 5
0
#!/usr/bin/python3

import sbemdb
import mapping
import confidence
import trials

import numpy as np
import matplotlib.pyplot as plt 

############ GENERAL PREPARATION #####################################
# Connect to the tracing database
db = sbemdb.SBEMDB()

# Create a mapping object for converting between various forms of neuron ID
mp = mapping.Mapping()

############ VSD/EPHYS EXAMPLE #######################################
# Load electrophysiology/VSD trial #9 (an example of local bending)
tr = trials.Trial(9)

# Extract timing of electrophysiological stimulus
stm = tr.stimuli()['P_VL'] 
tt, t_unit = stm.timestamps()
ii, i_unit = stm.trace()

# Plot the stimulus
plt.interactive(True)
plt.figure()
plt.plot(tt, ii)
plt.xlabel(f'Time ({t_unit})')
Exemplo n.º 6
0
#from sys import exit
#from random import randint
#from textwrap import dedent
#starts the game
import engine, mapping

map = mapping.Mapping('intro')
game = engine.Engine(map)
game.play()

Exemplo n.º 7
0
def create_mapping_item_pages(mapping_file_pathname, template):
    """Create an html page for each information concept in a mapping. e.g. http://airm.aero/developers/fixm-4.2.0-to-airm-1.0.0/AerodromeSurfaceWind.html

  Keyword arguments:
    mapping_file_pathname -- string defining the location and name of the mapping e.g. data/xlsx/mapping FIXM 4.2.0.xlsx
    template -- string defining the location and name of the html template e.g. data/html/templates/concept-list-template.html
  """
    import mapping
    mapping = mapping.Mapping(mapping_file_pathname)
    mapping_metadata = mapping.metadata

    information_concepts = mapping.get_information_concepts()

    for info_concept in information_concepts:
        print(info_concept["Information Concept"])
        import utils
        soup = utils.create_html_soup(template)

        soup.title.string = str(
            info_concept['Information Concept']
        ) + " - " + mapping_metadata["name"] + " | AIRM.aero"
        soup.find(text="MAPPING_NAME_BC").replace_with(
            str(mapping_metadata["name"]))
        soup.find(id="MAPPING_NAME_BC"
                  )["href"] = "../" + mapping_metadata["url_name"] + ".html"
        soup.find(text="INFO_CONCEPT_NAME_BC").replace_with(
            str(info_concept['Information Concept']))
        h2 = soup.new_tag("h2")
        h2.string = str(info_concept['Information Concept'])
        soup.find(id="INFO_CONCEPT_NAME").insert(0, h2)

        code = soup.new_tag("code")
        code.string = info_concept['Concept Identifier']
        code["class"] = "text-secondary"
        soup.find(id="INFO_CONCEPT_NAME").insert(1, code)

        soup.find(text="INFO_CONCEPT_DEFINITION").replace_with(
            str(info_concept['Concept Definition']))

        data_concepts = mapping.get_data_concepts(
            info_concept['Information Concept'])
        insert_position = 0
        for data_concept in data_concepts:
            if data_concept["Data Concept"] != 'missing data':
                new_table_row = create_properties_table_row(data_concept)
                soup.find(id="DATA_CONCEPTS_LIST").insert(
                    insert_position, new_table_row)
            insert_position += 1

        insert_position = 0
        soup.find(id="DATA_CONCEPTS_DETAIL").insert(insert_position,
                                                    soup.new_tag("hr"))
        insert_position += 1
        h3 = soup.new_tag("h3")
        h3["style"] = "text-align:center; color:grey; margin-top: 50px; margin-bottom: 20px;"
        h3.string = "Details"
        soup.find(id="DATA_CONCEPTS_DETAIL").insert(insert_position, h3)
        insert_position += 1

        new_div = create_class_detail_div(info_concept)
        soup.find(id="DATA_CONCEPTS_DETAIL").insert(insert_position, new_div)
        insert_position += 1

        soup.find(id="DATA_CONCEPTS_DETAIL").insert(insert_position, new_div)
        for data_concept in data_concepts:
            if data_concept["Data Concept"] != 'missing data':
                new_div = create_property_detail_div(data_concept)
                soup.find(id="DATA_CONCEPTS_DETAIL").insert(
                    insert_position, new_div)
            insert_position += 1

        f = open(
            "docs/airm/developers/" + mapping_metadata["url_name"] + "/" +
            info_concept["Information Concept"] + ".html", "w+")
        f.write(soup.prettify())
        f.close()
Exemplo n.º 8
0
def updateDatabaseCSV(request):
    if request.user.is_authenticated:
        if request.method == "POST":

            _file = request.FILES['file']
            _dest = './temp/{}_{}'.format(request.user, int(time.time()))

            # Preliminary check on document
            validate_file = FileValidator(
                max_size=52428800,
                content_types=('text/plain',
                               'text/csv'))  # 52428800 B = 50 MiB
            try:
                validate_file(_file)
            except ValidationError as e:
                return http.HttpResponse(status=500, content=e)

            if not os.path.exists('./temp'):
                try:
                    os.makedirs('./temp')
                except OSError as e:
                    return http.HttpResponse(status=500, content=e)

            try:
                with open(_dest + ".csv", 'wb+') as destination:
                    for chunk in _file.chunks():
                        destination.write(chunk)
            # https://stackoverflow.com/a/4992124/3211506 (Does not catch KeyboardInterrupt, etc.)
            except Exception as e:
                # Remove file if exists
                if os.path.isfile(_dest + ".csv"):
                    os.remove(_dest + ".csv")
                return http.HttpResponse(status=500, content=e)

            # Import the CSV
            # delete = True, and no log files since we don't want to clog up the server
            di = importCSV.DatabaseImporter(_dest + ".csv",
                                            request.user.username, True, None)
            try:
                di.parse()
            except (importCSV.ImporterError, AssertionError) as e:
                di.clean()
                return http.HttpResponse(status=500, content=e)
            except Exception as e:
                di.clean()
                return http.HttpResponse(
                    status=500,
                    content="Unknown Parsing/Update Error: {}".format(e))

            di.clean()
            return http.HttpResponse(content="OK")
        else:
            # If not POST, we generate a template for the csv
            m = mapping.Mapping()
            _csvT = m.generateTemplate()

            # https://stackoverflow.com/a/1158750/3211506
            _res = http.HttpResponse(content=_csvT, content_type='text/csv')
            _res['Content-Disposition'] = 'attachment; filename={}'.format(
                smart_str("template.csv"))
            return _res
    else:
        return http.HttpResponseForbidden(content="Forbidden; Please Login")
def callmapping(var_maptype,
                var_sampletype,
                working_directory,
                library,
                threads,
                var_gatk_tools,
                issplitchr,
                trim,
                middle_files="Yes"):
    mt = var_maptype
    if middle_files == "Yes":
        mdf_keep = True
    else:
        mdf_keep = False
    st = var_sampletype
    wd = working_directory
    if wd[-1] == "/" or wd[-1] == "\\":
        wd = wd[:-1]
    lb = library
    th = threads
    gt = var_gatk_tools
    sc = issplitchr
    tr = trim
    os.chdir(wd)

    fastq_list = helpers.get_fastq()
    info_dict = helpers.get_info(st, fastq_list)

    if tr == "Yes":
        if not os.path.exists(wd + "/QC"):
            qc = qc_trim.QC(wd, st, th, fastq_list, info_dict, mt)
            qc.run_qc()
    else:
        if os.path.exists(wd + "/QC"):
            tr = "Yes"

    mapping_step = mapping.Mapping(working_directory=wd,
                                   map_type=mt,
                                   sample_type=st,
                                   library_matching_id=lb,
                                   thrds=th,
                                   trim=tr)

    mapping_files = mapping_step.mapping()
    #mapping_files = ["SortedBAM_Bwa_NOB01_AACGTGA_L001_001.bam"]

    if not mdf_keep:
        helpers.delete_files_from_folder(wd, mt, "Mapping", mapping_files)
    print("---------------------------")
    print(mapping_files)
    pre_processing_step = pre_processing.PreProcessing(working_directory=wd,
                                                       map_type=mt,
                                                       sample_type=st,
                                                       library_matching_id=lb,
                                                       thrds=th,
                                                       issplitchr=sc)

    print("---------------------------")
    print(fastq_list)
    print(info_dict)
    gatk_file_list = []
    if gt == "Yes":
        if issplitchr != "No":
            mark_duplicate_file = pre_processing_step.pre_process(
                info_dict, mapping_files)
            for file in mark_duplicate_file:
                gatk_pre_processing_step = gatk_pre_processing.GatkPreProcessing(
                    working_directory=wd,
                    map_type=mt,
                    sample_type=st,
                    library_matching_id=lb,
                    thrds=th)
                return_files = gatk_pre_processing_step.run_gatks4(file)
                print(return_files)
                gatk_file_list.append(return_files)
                print(gatk_file_list)

        else:
            mark_duplicate_file = pre_processing_step.pre_process(
                info_dict, mapping_files)
            gatk_pre_processing_step = gatk_pre_processing.GatkPreProcessing(
                working_directory=wd,
                map_type=mt,
                sample_type=st,
                library_matching_id=lb,
                thrds=th)
            gatk_files = gatk_pre_processing_step.run_gatks4(
                mark_duplicate_file)

            if not mdf_keep:
                helpers.delete_files_from_folder(wd, mt, "PreProcess",
                                                 gatk_files)
    else:
        mark_duplicate_file = pre_processing_step.pre_process(
            info_dict, mapping_files)
        if not mdf_keep:
            helpers.delete_files_from_folder(wd, mt, "PreProcess",
                                             mark_duplicate_file)

    return True