Esempio n. 1
0
    spreadname = 'Import Test Sheet'

    rowsize = 2
    colsize = 52

    # load object  SHOULD HANDLE ERRORS GRACEFULLY
    print('Opening ' + data_file)
    object_list = ReadJSON(data_file)
    # find out what types of objects you have. use as worksheet names.
    # also, change all json object values to strings
    typelist = []
    for json_object in object_list:
        json_object = FlatJSON(json_object, keys)
        typetemp = str(json_object['@type'][0])
        typelist.append(typetemp)
        for name, value in json_object.items():
            if type(value) is list:
                if value == []:
                    json_object[name] = ''
                elif type(value[0]) is dict:
                    json_object[name] = str(value)
                else:
                    json_object[name] = ', '.join(value)
            elif (type(value) is int) | (type(value) is float):
                json_object[name] = str(value)

    typelist = list(set(typelist))
    typelist.sort()

    # get column headers based on schema fields
    # get all relevant schema
        # if object is found, check for differences and patch it if needed.
        else:

            # compare new object to old one, remove identical fields.  Also, remove fields not present in schema. SHOULD INFORM OF THIS OPERATION, BUT NOT NEEDED WHEN SINGLE PATCH CODE EXISTS.
            for key in new_object.keys():
                if new_object.get(key) == old_object.get(key):
                    new_object.pop(key)
                elif not old_object.get(key):
                    new_object.pop(key)

            # if there are any different fields, patch them.  SHOULD ALLOW FOR USER TO VIEW/APPROVE DIFFERENCES
            if new_object:
                
                # inform user of the updates
                print(object_id + ' has updates.')
                #print(new_object)
                
                # patch each field to object individually
                for key,value in new_object.items():
                    patch_single = {}
                    patch_single[key] = value
                    print(patch_single)
                    response = patch_ENCODE(object_id,patch_single,keys)

            # inform user there are no updates            
            else:
                print(object_id + ' has no updates.')


Esempio n. 3
0
    # search criteria
    search_value_string = 'GM12893'
    search_key_string = 'biosample_term_name'

    # file name for saved objects
    find_file = 'find.json'

    
    # retrieve the relevant objects
    master_objects = ElasticSearchJSON(server,query,object_type,hitnum)

    # flatten objects and select based on search criteria
    object_list = []
    for master_object in master_objects:
        master_object = FlatJSON(master_object,keys)
        for key,value in master_object.items():
            if search_key_string in str(key):
                #print(key)
                if type(value) is unicode:
                    if search_value_string in str(value):
                        print('Object ' + master_object[u'accession'] + ' Selected.  ' + str(key) + ' - ' + value)
                        object_list.append(master_object)
                        break
                if type(value) is list:
                    for entry in value:
                        if search_value_string in str(entry):
                            print('Object ' + master_object[u'accession'] + ' Selected.  ' + str(key) + ' - ' + entry)
                            object_list.append(master_object)
                            break

    print(str(len(object_list)) + ' of ' + str(len(master_objects)) + ' Objects Found.  Saved to ' + find_file + '.')
                # post the new object(s).  SHOULD HANDLE ERRORS GRACEFULLY
                response = new_ENCODE(object_collection, new_object, keys)

        # if object is found, check for differences and patch it if needed.
        else:

            # compare new object to old one, remove identical fields.  Also, remove fields not present in schema. SHOULD INFORM OF THIS OPERATION, BUT NOT NEEDED WHEN SINGLE PATCH CODE EXISTS.
            for key in new_object.keys():
                if new_object.get(key) == old_object.get(key):
                    new_object.pop(key)
                elif not old_object.get(key):
                    new_object.pop(key)

            # if there are any different fields, patch them.  SHOULD ALLOW FOR USER TO VIEW/APPROVE DIFFERENCES
            if new_object:

                # inform user of the updates
                print(object_id + ' has updates.')
                #print(new_object)

                # patch each field to object individually
                for key, value in new_object.items():
                    patch_single = {}
                    patch_single[key] = value
                    print(patch_single)
                    response = patch_ENCODE(object_id, patch_single, keys)

            # inform user there are no updates
            else:
                print(object_id + ' has no updates.')
Esempio n. 5
0
    spreadname = 'Import Test Sheet'

    rowsize = 2
    colsize = 52

    # load object  SHOULD HANDLE ERRORS GRACEFULLY
    print('Opening ' + data_file)
    object_list = ReadJSON(data_file)
    # find out what types of objects you have. use as worksheet names.
    # also, change all json object values to strings
    typelist = []
    for json_object in object_list:
        json_object = FlatJSON(json_object,keys)
        typetemp = str(json_object['@type'][0])
        typelist.append(typetemp)
        for name,value in json_object.items():
            if type(value) is list:
                if value == []:
                    json_object[name] = ''
                elif type(value[0]) is dict:
                    json_object[name] = str(value)
                else:
                    json_object[name] = ', '.join(value)
            elif (type(value) is int) | (type(value) is float):
                json_object[name] = str(value)

    typelist = list(set(typelist))
    typelist.sort()

    # get column headers based on schema fields
    # get all relevant schema