def post(self):

        try:

            data = request.get_json()

            make = data['make']
            model = data['model']

            # considering combinations of make and model for returning the average price
            ans = Car_db.query.with_entities(
                Car_db.make,
                func.avg(Car_db.price).label('average_price')).filter(
                    and_(Car_db.make == make,
                         Car_db.model == model)).group_by(Car_db.make).all()
            if ans == []:
                logging.info(
                    'Record not found. Please enter valid combination of make and model'
                )
                return {
                    'message':
                    'Record not found. Please enter valid combination of make and model'
                }
            else:
                logging.info('Average price: %r', ans)
                return ans

        except Exception as e:
            db.session.rollback()
            abort(400, e.args[0])
        finally:
            db.session.close()
Exemple #2
0
 def get(self):
     try:
         ContentWorker().train()
         CollaborativeWorker().train()
         return make_response()
     except StatusCodeException as ex:
         return ex.to_response()
     except Exception as ex:
         info(traceback.print_exc())
         return StatusCodeException(ex.message, 500).to_response()
    def __init__(self):
        start = time.time()
        self.item_service = ItemService()
        self.user_service = UserService()
        self.rating_service = RatingService()
        self.recommender_service = Recommender_service()

        self.items = pd.DataFrame(list(self.item_service.get_all()))
        self.users = pd.DataFrame(list(self.user_service.get_all()))
        self.ratings = pd.DataFrame(list(self.rating_service.get_all()))
        self.ds = Dataset.load_builtin(self.ratings)
        info("Training data ingested in %s seconds." % (time.time() - start))
 def __init__(self):
     start = time.time()
     self.item_service = ItemService()
     self.data = pd.DataFrame(list(self.item_service.get_rec_data()))
     self.tfidf = TfidfVectorizer(analyzer='word',
                                  ngram_range=(1, 3),
                                  min_df=0,
                                  smooth_idf=False,
                                  stop_words='english')
     self.tfidf_matrix = self.tfidf.fit_transform(
         self.data['concated_attrs'])
     self.cosine_similarities = linear_kernel(self.tfidf_matrix,
                                              self.tfidf_matrix)
     info("Training data ingested in %s seconds." % (time.time() - start))
    def train(self):
        """
        Train the engine.

        Create a TF-IDF matrix of unigrams, bigrams, and trigrams for each product.

        Then similarity is computed between all products using SciKit Cosine Similarity.

        Iterate through each item's similar items and store the 50 most-similar.
        Similarities and their scores are stored in database as a sorted set, with one set for each item.

        Returns:
            None
        """
        start = time.time()
        for index, item in self.data.iterrows():
            self._train_item(item, index)
        info("Engine trained in %s seconds." % (time.time() - start))
    def get(self, id):
        try:
            # I am using firebase-admin sdk for reading the Bearer access token from header.
            token = request.headers.get('Authorization').replace(
                "Bearer ", "").strip()

            car = Car_db.query.with_entities(
                Car_db.id, Car_db.make, Car_db.model, Car_db.year,
                Car_db.last_updated,
                Car_db.price).filter(Car_db.id == id).all()

            car = carschema.dump(car, many=True).data

            logging.info('Response json: %r', car)

            return car, 200
        except Exception as e:
            db.session.rollback()
            abort(400, e.args[0])
        finally:
            db.session.close()
    def train_item(self, item_id):
        """
        Train the engine for a given item.

        Create a TF-IDF matrix of unigrams, bigrams, and trigrams for the given item.

        Then similarity is computed between the given product and all other products, using SciKit Cosine Similarity.

        Iterate through each item's similar items and store the 50 most-similar.
        Similarities and their scores are stored in database as a sorted set of the item.

        Args:
            - item_id (objectId): The item id from training.

        Returns:
            None

        """
        start = time.time()
        item, index = self._get_item_index(item_id)
        self._train_item(item, index)
        info("Item %s trained in %s seconds." % (item_id,
                                                 (time.time() - start)))
    def post(self):
        try:
            csv_file = request.files['car_data']

            df = pd.read_csv(csv_file)
            logging.info('The data is : %r', df)

            engine = db.get_engine()

            df.to_sql(
                name=Car_db.__tablename__,
                con=engine,
                if_exists='replace',
                index=False,
                index_label='chassis_no'
            )  # if_exists="fail,append,replace" you can keep anything as per requirement

            return 'File read successfully', 201

        except Exception as e:
            db.session.rollback()
            abort(400, e.args[0])
        finally:
            db.session.close()
 def train(self):
     start = time.time()
     self.calculatePearsonSimilarity(self.ratings, 1, 2)
     #evaluate(algo, ds, measures=['RMSE', 'MAE'])
     info("Engine trained in %s seconds." % (time.time() - start))