示例#1
0
文件: handler.py 项目: ike01/clood
def cbr_retain(event, context=None):
  """
  End-point: Completes the Retain step of the CBR cycle. Note: If the new case have id of an existing case, the new case will replace the existing entry.
  """
  result = {}
  # retain logic here
  statusCode = 201
  params = json.loads(event['body'])  # parameters in request body
  proj = params.get('project')
  es = getESConn()
  if proj is None:
    projId = params.get('projectId')  # name of casebase
    proj = utility.getByUniqueField(es, projects_db, "casebase", projId)
  # print(params)
  new_case = params['data']
  new_case = retrieve.add_vector_fields(proj['attributes'], new_case)  # add vectors to Semantic USE fields
  new_case['hash__'] = str(hashlib.md5(json.dumps(OrderedDict(sorted(new_case.items()))).encode('utf-8')).digest())

  if not proj['retainDuplicateCases'] and utility.indexHasDocWithFieldVal(es, index=proj['casebase'], field='hash__',
                                                                          value=new_case['hash__']):
    result = "The case already exists in the casebase"
    statusCode = 400
  else:
    result = es.index(index=proj['casebase'], body=new_case)

  response = {
    "statusCode": statusCode,
    "headers": headers,
    "body": json.dumps(result)
  }
  return response
示例#2
0
def save_case_list(event, context=None):
    """
  End-point: Saves list of case instances
  Creates index for the casebase if one does not exist
  """
    # try:
    doc_list = json.loads(event['body'])  # parameters in request body
    es = getESConn()
    pid = event['pathParameters']['id']
    proj = utility.getByUniqueField(es, projects_db, "_id", pid)  # project
    # create index with mapping if it does not exist already
    project.indexMapping(es, proj)

    # Add documents to created index
    # print("Adding a hash field to each case for duplicate-checking")
    for x in doc_list:  # generate a hash after ordering dict by key
        x = retrieve.add_vector_fields(proj['attributes'],
                                       x)  # add vectors to Semantic USE fields
        x = retrieve.add_lowercase_fields(
            proj['attributes'],
            x)  # use lowercase values for EqualIgnoreCase fields
        x['hash__'] = str(
            hashlib.md5(
                json.dumps(OrderedDict(sorted(x.items()))).encode('utf-8')).
            digest())  # case hash for easy detection of duplicates
    # print("Attempting to index the list of docs using helpers.bulk()")
    resp = helpers.bulk(es, doc_list, index=proj['casebase'], doc_type="_doc")

    # Indicate that the project has a casebase
    # print("Casebase added. Attempting to update project detail. Set hasCasebase => True")
    proj['hasCasebase'] = True
    source_to_update = {'doc': proj}
    # print(source_to_update)
    res = es.update(index=projects_db, id=pid, body=source_to_update)
    # print(res)

    # create the ontology similarity if specified as part of project attributes (can be a lengthy operation for mid to large ontologies!)
    for attrib in proj['attributes']:
        if attrib['type'] == "Ontology Concept" and attrib.get(
                'similarityType') is not None and attrib.get(
                    'options') is not None and retrieve.checkOntoSimilarity(
                        attrib['options'].get('id'))['statusCode'] != 200:
            sim_method = 'san' if attrib[
                'similarityType'] == 'Feature-based' else 'wup'
            retrieve.setOntoSimilarity(
                attrib['options'].get('id'),
                attrib['options'].get('sources'),
                relation_type=attrib['options'].get('relation_type'),
                root_node=attrib['options'].get('root'),
                similarity_method=sim_method)

    response = {
        "statusCode": 201,
        "headers": headers,
        "body": json.dumps(resp)
    }
    return response