示例#1
0
from requests.exceptions import Timeout
"""
Get user security context
${rLabkeySessionId} 
"""
server = '${baseServerURL}'

#sessionID = '${httpSessionId}'
#apikey = 'session|'+sessionID
apikey = 'apikey|YOURKEYHERE'

#folder = "${containerPath}"
folder = 'project/subfolder'  # Project folder path

schema = 'assay.General.AssayName'

#table = 'Batches'
#table = 'Runs'
table = 'Data'

# TODO: Is ssl really necessary for internal script?
server_context = create_server_context(server, folder, api_key=apikey)

result = select_rows(server_context, schema, table)
rows = result['rows']

# write out results to verify function
with open(sr.filePathRunPropertiesOut, 'a') as f:
    for r in rows:
        f.write(r + '\n')
示例#2
0
project_name = 'ModuleAssayTest'  # Project folder name
context_path = 'labkey'
server_context = create_server_context(labkey_server,
                                       project_name,
                                       context_path,
                                       use_ssl=False)

schema = 'lists'
table = 'Demographics'
column1 = 'Group Assignment'
column2 = 'Participant ID'

###################
# Test basic select_rows
###################
result = select_rows(server_context, schema, table)
if result is not None:
    print(result['rows'][0])
    print("select_rows: There are " + str(result['rowCount']) + " rows.")
else:
    print('select_rows: Failed to load results from ' + schema + '.' + table)

###################
# Test error handling
###################
# catch base error
try:
    result = select_rows(server_context, schema, 'badtable')
    print(result)
except RequestError:
    print('Caught base error')
示例#3
0
    and then click <export>, select the <Script> tab, then select
    the <Python> radio button and <Create Script>. Copy the
    schema_name to 'schema' and query_name to 'table' below.
    The 'folder' path is automatically updated here, though
    if needed, replace below with the second arg in the 
    labkey.utils.create_server_context() call from the created script text.
    """
    folder = "${containerPath}"
    schema = 'lists'
    table = 'Analytes'
    server_context = create_server_context(server,
                                           folder,
                                           context_path='labkey',
                                           api_key=apikey)
    result = select_rows(server_context=server_context,
                         schema_name=schema,
                         query_name=table,
                         timeout=10)
    atable = pd.DataFrame(result['rows'])
    analytes = set(atable['analyte'])
    units_preset = atable.set_index('analyte')['unit'].to_dict()
    """
    Optionally include this portion if reading a file on the
    server instead of performing an API query.
    """
#    #query for local file
#    atablePath = '...analytes.tsv'
#    atable = pd.read_csv(atablePath, sep='\t')
#    analytes = set(atable['analyte'].to_list())
#    units_preset = atable.set_index('analyte')['unit'].to_dict()
#except FileNotFoundError:
except QueryNotFoundError:
def main():
    args = get_options()
    dictionaryFromFile = []

    with open('data/dictionary.txt', 'r') as inf:
        for line in inf:
            dictionaryFromFile.append(eval(line))

    labkeyDictionary = dictionaryFromFile[0]

    labkeyServer = 'labkey.uhnresearch.ca'
    contextPath = 'labkey'
    schema = 'study'

    projectName = args.project.lower()

    if projectName not in labkeyDictionary:
        print('Caught bad project name. Please pass in a project name that is on labkey.')
        exit()
    projectDatasets = labkeyDictionary[projectName]

    outputFolder = "results"

    if not os.path.exists(outputFolder):
        os.makedirs(outputFolder)

    print("Create a server context")
    serverContext = create_server_context(labkeyServer, projectName, contextPath, use_ssl=True)

    if args.json:
        print("JSON output required.")
        # args.project.lower()
        # print(projectName)
        # print(projectName.split("/")[-1].lower())
        outputFile = os.path.join(outputFolder, projectName.split("/")[-1].lower()+ ".json")
        # print(args.project.lower())
        # print(outputFile)
        file = open(outputFile, "w")
        print("Created a " + outputFile + " file.")

    dict = {}

    if (args.cbio):
        print("Cbio output required.")
        outputCbioDir = args.project.lower()
        createDirectory(outputFolder + "/" + outputCbioDir)

    for table in projectDatasets:
        # print(table)
        try:

            result = select_rows(serverContext, schema, table)

            if result is not None:
                row_to_add = result["rows"]

                if (args.cbio):
                    newFile = open(outputFolder + "/" + outputCbioDir + "/" + table + ".txt", "w")
                    header = True

                for idx in range(len(row_to_add)):

                    row_to_add[idx] = removeUnnecessaryColumns(row_to_add[idx])
                    row_to_add[idx] = convertUnicodeToASCII(row_to_add[idx])
                    row_to_add[idx] = removeTrailingSpacesInValues(row_to_add[idx])
                    row_to_add[idx] = changeDateTimeFormat(row_to_add[idx])
                    row_to_add[idx] = convertKeyToUpperCase(row_to_add[idx])
                    row_to_add[idx] = renameSpecificColumns(row_to_add[idx], table)

                    if (args.cbio):

                        rowDict = row_to_add[idx]
                        rowDictHeader = ""
                        rowDictHeaderValuesList = []
                        rowDictvalues = ""

                        if (header):
                            print(rowDict)
                            for key in rowDict.keys():
                                print(key)
                                rowDictHeader += key + "\t"
                                rowDictHeaderValues = rowDict[key] + "\t"
                                # rowDictHeaderValuesList.append(rowDict[key])

                            rowDictHeader.strip()
                            if table.lower() in datasetsWithMetadata:
                                rowDictHeaderValuesList = rowDictHeaderValues.split("\t")

                                # print(rowDictHeader)
                                metadataList = rowDictHeader.split("\t")
                                patientIdIdx = metadataList.index("PATIENT_ID")
                                metadataStr = rearrangeColumns(metadataList, patientIdIdx)
                                print(rowDictHeaderValuesList)
                                newRowDictHeaderValuesList = rearrangeColumns(rowDictHeaderValuesList, patientIdIdx)
                                print(newRowDictHeaderValuesList)

                                # createMetadata(inputString, rowDictHeaderValuesList)
                                print(rowDictHeaderValuesList)
                                newFile.write(createMetadata(metadataStr, rowDictHeaderValuesList) + "\n")
                            # print(rowDictHeader.split("\t"))
                            headerList = rowDictHeader.split("\t")

                            patientIdIdx = headerList.index("PATIENT_ID")
                            rowDictHeader = rearrangeColumns(headerList, patientIdIdx)

                            # print(newRowDictHeader)
                            newFile.write(rowDictHeader + "\n")
                            header = False

                        for key in rowDict.keys():

                            if (type(rowDict[key]) != str):
                                rowDictvalues += str(rowDict[key]) + "\t"
                            else:
                                rowDictvalues += rowDict[key] + "\t"
                        rowDictvalues.strip()


                        rowDictvaluesList = rowDictvalues.split("\t")
                        # rowDictvalues = rearrangeColumns(rowDictvaluesList, patientIdIdx)

                        # print(patientIdIdx)
                        # print(rowDictvalues)
                        # print(rowDictvalues)
                        newFile.write(rowDictvalues + "\n")

                if (args.cbio):
                    newFile.close()

                dict[table] = row_to_add

                print("From the dataset " + table + ", the number of rows returned: " + str(result['rowCount']))
            else:
                print('select_rows: Failed to load results from ' + schema + '.' + table)
        except QueryNotFoundError:
            print('Error: The table ' + table + " was not found.")

    if args.json:
        file.write(json.dumps((dict), indent=4, sort_keys=True))
        file.close()
示例#5
0
                              concatcbs = rcdat[checkedlistcols].apply(lambda x: ';'.join(x[~x.isna()]),axis=1)
                              rcforms[dd['form_name'].loc[field]][field] = concatcbs
                          # If it isn't a checkbox, convert all codes to their values
                          else:
                              rcdat[field].replace(optmap, inplace=True)
                  if dd['field_type'].loc[field] == 'yesno':
                      rcforms[dd['form_name'].loc[field]][field] = rcdat[field].map(ynmap)
                  elif not isCheckbox:
                      rcforms[dd['form_name'].loc[field]][field] = rcdat[field]
              else:
                  # print('that field isn\'t in the data dump')
                  pass

### Assign LabKey SubjectIDs
# Get the next available SubjectID
nextIDqresult = select_rows(server_context, 'study','GetNextSubjectID')
nextSubjID = nextIDqresult['rows'][0]['nextSubjID']
if not nextSubjID:
    nextSubjID = 0

# Initialize fields that will be used in ID assignment
rcsubjdat.loc[:,'labkey_subjid'].replace('', np.nan, inplace=True)

# For each REDCap ID, match it to its existing Subject ID & F number
# If there isn't a corresponding Subject ID, assign one
# Then check if this individual should have an F#, and assign one
# if they don't have one already. F# is determined using the referral
# date when available, but falls back to the current date if it isn't.
anybodynew = False
for rcid in rcsubjdat.index:
    if pd.notnull(rcsubjdat.loc[rcid,'labkey_subjid']):
示例#6
0
from labkey.utils import create_server_context
from labkey.query import select_rows, QueryFilter

print("Create a server context")
server_context = create_server_context('idri.labkey.com', 'Formulations')

ssr = select_rows(server_context, 'Samples', 'Formulations')
print("select_rows: There are " + str(len(ssr['rows'])) + " rows.")

ssr = select_rows(server_context, 'Samples', 'Formulations', filter_array=[
    QueryFilter('Batch', 'QF', filter_type=QueryFilter.Types.CONTAINS)
])
print("select_rows: There are " + str(len(ssr['rows'])) + " filtered rows.")
def getNextLabKeyID(server_context):
    nextIDqresult = select_rows(server_context, 'study', 'GetNextSubjectID')
    nextSubjID = nextIDqresult['rows'][0]['nextSubjID']
    if not nextSubjID:
        nextSubjID = 0
    return nextSubjID
print("Create a server context")
labkey_server = 'localhost:8080'
project_name = 'moduleAssayTest'  # Project folder name
contextPath = 'labkey'
server_context = create_server_context(labkey_server, project_name, contextPath, use_ssl=False)

schema = 'lists'
table = 'Demographics'
column1 = 'Group Assignment'
column2 = 'Participant ID'


###################
# Test basic select_rows
###################
result = select_rows(server_context, schema, table)
if result is not None:
    print(result['rows'][0])
    print("select_rows: There are " + str(result['rowCount']) + " rows.")
else:
    print('select_rows: Failed to load results from ' + schema + '.' + table)


###################
# Test error handling
###################
# catch base error
try:
    result = select_rows(server_context, schema, 'badtable')
    print(result)
except RequestError: