Esempio n. 1
0
    def match_genres(self) -> Tuple[List[str], float]:
        db_genres = self.db_user.genres
        current_genres = self.current_user.genres

        high_impact_db = dict()
        for (key, value) in db_genres.items():
            if value > 3:
                high_impact_db[key] = value

        high_impact_current = dict()
        for (key, value) in current_genres.items():
            if value > 3:
                high_impact_current[key] = value

        genres_intersection = list(set(
            high_impact_db.keys()).intersection(
            high_impact_current.keys()))

        total_genres = min(
            len(high_impact_db.keys()),
            len(high_impact_current.keys())) or 1

        logger.debug(len(genres_intersection))

        return (genres_intersection, len(genres_intersection) / total_genres * 100)
Esempio n. 2
0
    async def update_one(
        self,
        *,
        table: str,
        criteria: dict,
        **new_values: dict,
    ) -> dict:
        try:
            update = {"$set": new_values}

            logger.debug(f"Update one: {criteria} to {update} -> {table}")

            result = await self._db[table].find_one_and_update(
                filter=criteria,
                update=update,
                return_document=ReturnDocument.AFTER,
            )

            logger.debug(f"Update one result: {result}")

            return result

        except pymongo.errors.DuplicateKeyError as exp:
            duplicate_value = exp.details["keyValue"]
            raise exceptions.BaseException_(
                status_code=exceptions.DuplicateValue.status_code,
                detail=f"Duplicate Value: {duplicate_value}",
            )
Esempio n. 3
0
    def _split_long_sentences(self, sentences):
        """Split long sentences.

        Args:
            sentences (list): list of flair's Sentences

        Returns:
            list:
        """
        extended = sentences.copy()
        tokenizer = self.model.embeddings.tokenizer
        offset = 0
        for i, sentence in enumerate(sentences):
            len_bpe = len(tokenizer.tokenize(sentence.to_tokenized_string()))
            if len_bpe > self.max_length:
                extended.pop(i + offset)
                num_pieces = len_bpe // self.max_length + 1
                for piece in array_split(sentence, num_pieces):
                    char_offset = piece[0].start_pos
                    sentence_piece = Sentence()
                    for token in piece:
                        token.start_pos -= char_offset
                        token.end_pos -= char_offset
                        sentence_piece.add_token(token)
                    piece[-1].whitespace_after = False
                    extended.insert(i + offset, sentence_piece)
                    offset += 1
                # we pop original sentence, so we should decrease offset by one
                offset -= 1
        logger.debug(f'Lengths before split: {[len(x) for x in sentences]}')
        logger.debug(f'Lengths after split: {[len(x) for x in extended]}')
        return extended
Esempio n. 4
0
 def wrap(*ags, **kwargs):
     logger.debug('START: {}'.format(name))
     t = Timer()
     rv = func(*ags, **kwargs)
     duration = t.stop()
     logger.debug('Done: {} sec'.format(duration))
     return rv
Esempio n. 5
0
def static_from_root():
    logger.debug('ROBOTS FLASK_CONFIG: %s', app.config['STAGING'])
    if app.config['PRODUCTION']:
        path = os.path.join('static', 'robots.txt')
        return redirect(path, code=302)
    else:
        path = os.path.join('static', 'block_robots.txt')
        return redirect(path, code=302)
Esempio n. 6
0
    async def delete_one(self, *, table: str, **criteria: dict) -> bool:
        logger.debug(f"Delete One: {criteria} -> {table}")

        result = await self._db[table].delete_one(filter=criteria)

        logger.debug(
            f"Delete one Result: {result.acknowledged} deleted: {result.deleted_count}"
        )

        return result.deleted_count == 1
Esempio n. 7
0
    async def read_one(
        self,
        table: str,
        **criteria: dict,
    ) -> dict:
        logger.debug(f"Read One: {criteria} -> {table}")

        result = await self._db[table].find_one(filter=criteria)

        logger.debug(f"Read one result: {result}")

        return result
Esempio n. 8
0
    async def insert_one(self, document: dict, *, table: str) -> str:
        logger.debug(f"Insert One: {document} -> {table}")

        try:
            return str((await
                        self._db[table].insert_one(document)).inserted_id)

        except pymongo.errors.DuplicateKeyError as exp:
            duplicate_value = exp.details["keyValue"]
            raise exceptions.BaseException_(
                status_code=exceptions.DuplicateValue.status_code,
                detail=f"Duplicate Value: {duplicate_value}",
            )
Esempio n. 9
0
 def response(self, request):
     X = request.get_json()['text']
     start = time()
     result, score = self.predict_paragraph(X)
     end = time()
     result = result.replace('_', ' ')
     logger.debug(f'time - {end - start}s')
     logger.debug(result)
     return json.dumps({
         'label': result,
         'confidence': score,
         'heading_num': result.split(' ')[0]
     }), 200
Esempio n. 10
0
def search_api(year):
    MAX_RESULTS = 300
    raw_query = request.args.get('query')

    if not raw_query or raw_query == '0':
        return jsonify({
            'error': 'Invalid Query Param',
            'results': [],
            'query': raw_query,
            'total_results': 0
        })

    final_query = process_query(raw_query)
    final_query_pat = re.compile(final_query, re.IGNORECASE)
    logger.debug('Raw Search Query: ' + str(raw_query))
    logger.debug('Final Search Query: ' + str(final_query_pat))

    results = search_db(pattern=final_query_pat, field='title')

    if not results:
        return jsonify({
            'error': 'No Results',
            'results': [],
            'target_year': year,
            'query': raw_query
        })

    sorted_results = sorted(results, key=lambda k: k['title'])
    prioritized_results = prioritize_match(results=sorted_results,
                                           raw_query=raw_query,
                                           field='title')
    total_results = len(prioritized_results)
    truncated = False
    if total_results > MAX_RESULTS:
        prioritized_results = prioritized_results[:MAX_RESULTS]
        truncated = True

    return jsonify({
        'results': prioritized_results,
        'target_year': year,
        'query': raw_query,
        'total_results': total_results,
        'max_results': MAX_RESULTS,
        'truncated': truncated
    })
Esempio n. 11
0
async def get_current_user(token: str = Depends(oauth2_scheme),
                           db: Session = Depends(get_db),
                           spotify_oauth: SpotifyOAuth = Depends(
                               get_spotipy_oauth),
                           response: Response = Depends(get_responce)) -> User:
    credentials_exception = HTTPException(
        status_code=status.HTTP_401_UNAUTHORIZED,
        detail="Could not validate credentials",
        headers={"WWW-Authenticate": "Bearer"},
    )

    try:
        payload = jwt.decode(token,
                             os.getenv('ACCESS_KEY'),
                             algorithms=[ALGORITHM])
        payload_id: str = payload.get("spotify_id")

        if payload_id is None:
            raise credentials_exception
        # token_data = TokenData(spotify_id=payload_id)
    except JWTError:
        raise credentials_exception
    user = get_user(db, payload_id)
    if user is None:
        raise credentials_exception
    payload_expires: str = payload.get("spotify_expires")
    is_token_expired = float(payload_expires) - datetime.now().timestamp() < 0

    if is_token_expired:
        auth_creds = refresh_spotify(user.refresh_token, spotify_oauth)
        logger.debug("AUTH", auth_creds)
        user_creds = UserBase(access_token=auth_creds["access_token"],
                              refresh_token=auth_creds["refresh_token"],
                              token_expires=auth_creds["expires_at"])
        user = update_user(db, user.spotify_id, cast(UserUpdate, user_creds))
        access_token = create_access_token(
            data=TokenData(spotify_id=user.spotify_id,
                           spotify_expires=user.token_expires - 600).dict(),
            expires_delta=ACCESS_TOKEN_EXPIRE_MINUTES)

        response.headers['authorization'] = f"Bearer {access_token}"
        response.headers['Access-Control-Expose-Headers'] = 'authorization'

    return user
Esempio n. 12
0
    def handle_request(request):
        """
        Entrypoint of the Alexa requests. Route POST request by request type to appropriate package to handle.
        :type request: AlexaRequest
        :param request: incoming Alexa request
        :return: Generated JSON response answer
        """
        logger.debug(f"request intent={request.intent_name()} received: {request.request}")

        if request.request_type() == "LaunchRequest":
            return handle_launch(request)
        elif request.request_type() == "IntentRequest":
            return handle_intent(request)
        elif request.request_type() == "Dialog.Delegate":
            return handle_intent(request)
        elif request.request_type() == "SessionEndedRequest":
            return handle_end(request)
        else:
            raise UnknownRequestError(request.request_type())
Esempio n. 13
0
def prioritize_match(results=None, raw_query=None, field=None):
    """If Search query matches title exactly (no fuzzy or space forgiveness)
    result is pushed up to top of list
    """
    try:
        exact_query = '^{query}$'.format(query=raw_query)
        pattern = re.compile(exact_query, re.IGNORECASE)
    except Exception as errmsg:
        logger.error(
            'prioritize: Could not compile raw_query: {}'.format(exact_query))
        return results
    prioritized_results = []
    for member in results:
        if re.match(pattern, member.get(field)):
            prioritized_results.insert(0, member)
            logger.debug('Priority Match Found: {}'.format(pattern))
        else:
            prioritized_results.append(member)
    # import pdb; pdb.set_trace()
    return prioritized_results
Esempio n. 14
0
    async def read_many(
        self,
        table: str,
        offset: Optional[int] = None,
        limit: Optional[int] = None,
        **criteria,
    ) -> list[dict]:
        logger.debug(f"Read many: {criteria} -> {table}")

        params = {}
        if offset is not None:
            params.update({"skip": offset})

        if limit is not None:
            params.update({"limit": limit})

        result = [
            doc async for doc in self._db[table].find(criteria, **params)
        ]

        logger.debug(f"Read many result: {result}")

        return result
Esempio n. 15
0
 def response(self, request):
     X = request.get_json()['X']
     start = time()
     result = self.predict_paragraph(X)
     end = time()
     logger.debug("Prediction was made.")
     logger.debug(f'time - {end - start}s')
     logger.debug(result)
     return json.dumps({'y': result, 'time': end - start}), 200
Esempio n. 16
0
compress = Compress()
flasks3 = FlaskS3()
cache = Cache()
toolbar = DebugToolbarExtension()

flask_config = os.environ['FLASK_CONFIG']
logger.info('** FLASK_CONFIG: {}'.format(flask_config))
app.config.from_object('app.config.{}'.format(flask_config))

compress.init_app(app)
flasks3.init_app(app)
cache.init_app(app)
toolbar.init_app(app)

if bool(int(os.getenv('CACHE_CLEAR', 0))):
    cache.clear()
    logger.info('** Cached cleared [CLEAR_CACHE] True')

# ASSETS
assets = Environment(app)
assets.register('css_assets', css_assets)
assets.register('js_assets', js_assets)
assets.register('js_api_assets', js_api_assets)
logger.debug('ASSETS DEBUG: {}'.format(assets.debug))

logger.debug('FLASK S3 ACTIVE: {}'.format(app.config['FLASKS3_ACTIVE']))
logger.debug('FLASK S3 ASSETS ACTIVE: {}'.format(app.config['FLASK_ASSETS_USE_S3']))

from app import views, seo_response, errors
from app import assets
Esempio n. 17
0
# //Init DB session

import uuid
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine import (columns, connection)
from app.utils.logger import logger
from app.config import settings

logger.info('Connecting to database...')
logger.debug('Contact points:')
logger.debug(settings.database.hosts)
"""USER MODELS"""


class User_by_id_Model(Model):
    __table_name__ = "user_by_id"

    user_id = columns.UUID(primary_key=True, default=uuid.uuid4, required=True)
    username = columns.Text(required=True)
    srp_salt = columns.Text(required=True)
    srp_verifier = columns.Text(required=True)
    kek_salt = columns.Text(required=True)
    cek = columns.Text(required=True)


class User_by_username_Model(Model):
    __table_name__ = "user_by_username"

    username = columns.Text(primary_key=True, required=True)
    user_id = columns.UUID(required=True)
Esempio n. 18
0
 async def count(self, *, table: str) -> int:
     logger.debug(f"Count -> {table}")
     return await self._db[table].estimated_document_count()
Esempio n. 19
0
import os
from fastapi import FastAPI
from app.utils.logger import logger
import app.routers

api = FastAPI()

logger.debug('Initializing routers...')
api.include_router(app.routers.auth.router)
Esempio n. 20
0
 def __init__(self, model=None, lang=True, max_length=None):
     self.model = TextClassifier.load(model)
     self.tokenizer = tokenizers[lang]
     logger.debug(f"Classifier was loaded. {model}")