Exemplo n.º 1
0
def getElementsByAsin(asinKeys):
    result = es.search(
        index="laptops",
        body={"query": {"terms": {"asin.keyword": asinKeys}}, "size": 7000},
    )

    return Backend_Helper.refineResult(result)
Exemplo n.º 2
0
def search_for_all_docs():
    allDocs = es.search(index="amazon",
                        body={
                            "size": 10000,
                            "query": {
                                "match_all": {}
                            }
                        })
    return allDocs
Exemplo n.º 3
0
def search_for_some_docs():
    some_docs = es.search(index="amazon",
                          body={
                              "query": {
                                  "match": {
                                      "avgRating": 5
                                  }
                              },
                              "size": 10
                          })
    return some_docs
Exemplo n.º 4
0
def get_vague_and_binary_lists(clean_data1):
  # create binary clean data if weighting is equal to 5
  binary_clean_data = {}
  clean_data = {}
  # bool_search_default = False #If no weighting = 5 for any value, do not caculate boolean search below
  for field in clean_data1.keys():
    if clean_data1[field]['weight'] == 5:
      # bool_search_default = True
      binary_clean_data[field] = clean_data1[field]  # weigth doesn't matter for boolean search
    else:
      clean_data[field] = clean_data1[field]
      # binary_clean data has to also contain the empty/meaningless fields  because this is the format needed for BinarySearch() method
      # This doesn't matter though because weight has no meaning for boolean search and is not used in the calculation for the result set
      binary_clean_data[field] = {'weight': 1}
  # print("print binary_clean_data: ", binary_clean_data)
  # print("print clean_data: ", clean_data)
  # Compute boolean/binary search for items with weighting = 5
  bin_obj = binary_search.BinarySearch()
  query = bin_obj.createBinarySearchQuery(binary_clean_data)
  res = es.search(index="amazon", body=query)
  output_binary = Backend_Helper.refineResult(res)
  return clean_data, output_binary
Exemplo n.º 5
0
def do_query(data):

  with open(allDocs_path, 'rb') as input:
    allDocs = pickle.load(input)


  data = Backend_Helper.clean_frontend_json(data)


  #create binary clean data if weighting is equal to 5
  binary_clean_data = {}
  clean_data = {}
  alexa_clean_data = {}
  output_binary = list()
  #bool_search_default = False #If no weighting = 5 for any value, do not caculate boolean search below

  for field in data.keys():
    if data[field]['weight'] == 5:
      #bool_search_default = True
      binary_clean_data[field] = data[field] #weigth doesn't matter for boolean search
    elif data[field]["weight"] == 6: #This is for alexa_search, will be used at the end
      alexa_clean_data[field] = data[field]
      pass
    else:
      clean_data[field] = data[field]

  #Compute boolean/binary search for items with weighting = 5
  bin_obj = binary_search.BinarySearch()
  alexa_searcher = alexa_functions.AlexaSearch(es)

  if len(binary_clean_data) > 0:
      query = bin_obj.createBinarySearchQuery(binary_clean_data)

      res = es.search(index="amazon", body=query)



      output_binary = Backend_Helper.refineResult(res)


  if len(alexa_clean_data) > 0 :

  #Add alexa search results to output_binary, same mechanism and logic for both.
      alexa_result = get_alexa_search_result(allDocs, alexa_clean_data, alexa_searcher)
      output_binary += alexa_result

  res_search = list()

  # if(len(output_binary)) > 0:
  #     allDocs = [item for item in allDocs if item['asin']  in output_binary]

  # field_value_dict has the form:
  # {'binary' : { 'brandName': ['acer', 'hp'], 'weight':1}, ...}, 'vague' : {....},
  field_value_dict = extract_fields_and_values(clean_data)
  # print(field_value_dict)


  #Get total cumulative weight weight_sum (for example for all attributes weights were 7) and dividue each score by this weight_sum
  #For normalization

  weight_sum = 0
  for field_type in field_value_dict.keys():
    for field_name in field_value_dict[field_type]:
      field_weight = field_value_dict[field_type][field_name]["weight"]
      if field_weight != 5: ##Shouldn't happen though because they have already been removed from clean_data
        weight_sum += field_weight

  # --------------------------------------------------------------------#
  # Objects for each class to use the vague searching functions
  range_searcher = vague_search_range.VagueSearchRange(es)

  binary_searcher = binary_search_text.BinarySearchText(es)

  harddrive_searcher = vague_search_harddrive.VagueHardDrive(es)

  value_searcher = vague_search_value.VagueSearchValue(es)

  ######################################################################## NEW #########################################
  #Function call in ColorInformation to extract searched values.
  #function extractKeyValuePairs() will do that.
  c_i_helper = ColorInformation()
  price_searcher = vague_search_price.VagueSearchPrice(es)
  ######################################################################## NEW #########################################



  # --------------------------------------------------------------------#
  # # Special case to handle hardDriveSize, length is >1 if it has values other than weight
  if 'hardDriveSize' in clean_data and len(clean_data["hardDriveSize"]) > 1:
    # res_search += vague_search_harddrive.computeVagueHardDrive_alternative(allDocs, clean_data,
    #                                                                                       harddrive_searcher,
    #                                                                                       res_search)
    res_search = harddrive_searcher.computeVagueHardDrive_alternative(allDocs, field_value_dict,
                                                                           harddrive_searcher,
                                                                           res_search)
  #  --------------------------------------------------------------------#
  # Special case to handle price
  if 'price' in clean_data and len(clean_data["price"]) > 1:
    ##NEW##########
    #res_search += vague_search_price.VagueSearchPrice.computeVaguePrice_alternative(allDocs, clean_data, price_searcher, res_search, searchedValues)
    res_search = price_searcher.computeVaguePrice_alternative(allDocs, field_value_dict, price_searcher, res_search)

  # --------------------------------------------------------------------#
  # Gets scores for all other attributes
  res_search += call_responsible_methods(allDocs, field_value_dict, range_searcher, binary_searcher, value_searcher,
                                         alexa_searcher)



  # --------------------------------------------------------------------#
  resList = [dict(x) for x in res_search]

  # Counter objects count the occurrences of objects in the list...
  count_dict = Counter()
  for tmp in resList:
    count_dict += Counter(tmp)

  result = dict(count_dict)
  sortedDict = collections.OrderedDict(sorted(result.items(), key=lambda x: x[1], reverse=True))
  asinKeys = list(result.keys())

  # call the search function
  outputProducts = getElementsByAsin(asinKeys) #calls helper class method refineResuls

  # Compare outputProducts and output_binary to select only items that also occur in boolean search
  outputProducts, output_binary = filter_from_boolean(outputProducts, output_binary)

  # add a vagueness score to the returned objects and normalize
  for item in outputProducts:
    # Normalize the scores so that for each score x,  0< x <=1
    item['vaguenessScore'] = result[item['asin']]/weight_sum


  outputProducts = sorted(outputProducts, key=lambda x: x["vaguenessScore"], reverse=True)

  for item in output_binary: #binary search results that did not meet other vague requirements
    item['vaguenessScore'] = None

  # concatenate with products with weighting 5 ***
  outputProducts = outputProducts + output_binary
  # products with same vagueness score should be listed according to price descending

  #searchedValues = c_i.extractKeyValuePairs()
  #c_i.prozessDataBinary(searchedValues)

  # If possible, apply sorting before weigthing, so it does not interfere with the list sorted by weighting
  s_p = SortByPrice()


  # #DELETE all products with vagueness_score = 0
  outputProducts_vaguenessGreaterZero = list()

  for laptop in outputProducts:
    if laptop["vaguenessScore"] != 0:
      outputProducts_vaguenessGreaterZero.append(laptop)
  outputProducts_vaguenessGreaterZero = s_p.sort_by_price(outputProducts_vaguenessGreaterZero)

  #outputProducts_vaguenessGreaterZero , output_binary = filter_from_boolean(outputProducts_vaguenessGreaterZero, output_binary)

  #outputProducts_vaguenessGreaterZero = outputProducts_vaguenessGreaterZero[:1000]
  c_i_helper.add_matched_information(data,outputProducts_vaguenessGreaterZero,allDocs)

  #Needed in frontend

  outputProducts_vaguenessGreaterZero_with_original_query = [outputProducts_vaguenessGreaterZero,data]

  return outputProducts_vaguenessGreaterZero_with_original_query
Exemplo n.º 6
0
def get_reviews_data(asin_keys):
    result = es.search(
        index="products",
        body={"query": {"terms": {"asin.keyword": asin_keys}}, "size": 7000},
    )
    return Backend_Helper.refineReviews(result)
Exemplo n.º 7
0
def search():
    """
    Search for a file, required parameter "q" is the term to search for

    Possible filters:
     - type = Limit only for this type of files
     - path = Limit only for files in this directory
    
    The search automatically add the following filters:
     - level = Limit only for files of level lowest or equals to current user's level
     - group = Limit only for files with least one group of the current user
    
    Extra parameters:
     - page = The number of page
    """
    query_text = request.args.get('q', '').lower()
    if not query_text:
        return abort(400)

    type_item = None
    type_id = request.args.get('type', None)
    path = request.args.get('path', '')
    page_size = 20
    page = int(request.args.get('page', 1)) - 1

    if type_id:
        type_item = mongo.db.types.find_one_or_404({'_id': ObjectId(type_id)})
        invalid_level = type_item['level'] > g.level
        if g.level < 3:
            type_groups = type_item['groups']
            user_groups = [group['_id'] for group in g.groups]
            invalid_group = len(type_groups) > 0 and set(
                user_groups).isdisjoint(type_groups)
        else:
            invalid_group = False
        if invalid_level or invalid_group:
            return abort(401)
        query_type = [type_id]
    else:
        query_type = [str(t['_id']) for t in get_types()]

    inherited_level = 1
    query = {
        'query': {
            'bool': {
                'must': [
                    {
                        'term': {
                            'doc_type': 'doc'
                        }
                    },  # Only documents
                    {
                        'range': {
                            'level': {
                                'lt': g.level + 1
                            }
                        }
                    },  # Only lower or equal than current user level
                    {
                        'terms': {
                            'type.keyword': query_type
                        }
                    },  # Only allowed types
                    {
                        'regexp': {
                            'path.keyword': '{}.*'.format(path)
                        }
                    },  # Only inside this path
                    {
                        'bool': {
                            'should': [
                                {
                                    'match': {
                                        'entities': '{}^6'.format(query_text)
                                    }
                                },  # Match term in entities with 6 of boost
                                {
                                    'match': {
                                        'name_lower': '{}^3'.format(query_text)
                                    }
                                },  # Match term in name with 3 of boost
                                {
                                    'match': {
                                        'description_lower':
                                        '{}'.format(query_text)
                                    }
                                }  # Match term in description with no boost
                            ],
                            'minimum_should_match':
                            '1'
                        }
                    }  # Should match at least one of the above
                ]
            }
        },
        'size': page_size,
        'from': page_size * page
    }
    results = es.search(index=app.config['ELASTICSEARCH_INDEX'],
                        doc_type='documents',
                        body=query)

    return render_template('search.html',
                           type_item=type_item,
                           path=None,
                           query_text=query_text,
                           inherited_level=inherited_level,
                           results=results)
Exemplo n.º 8
0
async def search_book(keyword: str):
    try:
        result = es.search(index="library", body={"query": {"match": {"title": f'"{keyword}"'}}})
        return result['hits']['hits']
    except elasticsearch.exceptions.NotFoundError:
        return {"error": "No matching book found!"}