Beispiel #1
0
settings_var = os.environ.get("STORAGE_SETTINGS")
if settings_var:
    app.config.from_pyfile(settings_var, silent=True)

config_get = app.config.get

cache = None
if config_get("REDIS_CACHE"):
    cache = RedisCache(host=config_get("REDIS_HOST"),
                       port=config_get("REDIS_PORT"),
                       password=config_get("REDIS_PASSWORD"),
                       db=config_get("REDIS_DB"),
                       default_timeout=config_get("REDIS_TIMEOUT"),
                       key_prefix=config_get("REDIS_PREFIX"))
else:
    cache = SimpleCache(threshold=50)

# The AWS session to connect to services.
aws_session = None
if all([config_get("AWS_ACCESS_KEY_ID"), config_get("AWS_SECRET_ACCESS_KEY")]):
    aws_session = boto3.session.Session(
        aws_access_key_id=config_get("AWS_ACCESS_KEY_ID"),
        aws_secret_access_key=config_get("AWS_SECRET_ACCESS_KEY"))
else:
    print("No AWS credentials specified")
    sys.exit(1)

s3_client = aws_session.client("s3")

# List of available size for bytes formatting.
SIZES = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
 def __init__(self):
     MySessionInterface.__init__(self,
                                 SimpleCache(),
                                 prefix='my_cache_session')
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.INFO)

ADMINISTRATORS = ('*****@*****.**', )

app = Flask(__name__)

app.config['SESSION_TYPE'] = 'memcached'
app.config[
    'SECRET_KEY'] = '\xcf\x10\xb4Q#\xf3\xcc5D\xf6\xb63\xadFk\x02"\x8b|D\xf3\x08\xc3['
#sess = Session()

mail_on_500(app, ADMINISTRATORS)
Compress(app)
app.config['COMPRESS_DEBUG'] = True
cache = SimpleCache(default_timeout=60 * 60 * 24)

EXAC_FILES_DIRECTORY = '../exac_data/'
REGION_LIMIT = 1E5
EXON_PADDING = 50


@app.route('/exac')
def homepageX():
    tmpdbname = (request.path).replace('/', '')
    #print "exac db-->>",request.path,tmpdbname
    session['db_selected'] = tmpdbname
    exac_var = tmpdbname.lower()
    cache_key = 't-homepage'
    t = cache.get(cache_key)
    if t is None:
Beispiel #4
0
def create_cache():
    return SimpleCache()
Beispiel #5
0
 def __init__(self):
     CacheSessionInterface.__init__(self,
                                    cache=SimpleCache(),
                                    prefix='simple_cache_session:')
Beispiel #6
0
downvote_course = db.Table('downvote_course',
    db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
    db.Column('course_id', db.Integer, db.ForeignKey('courses.id'))
    )

review_course = db.Table('review_course',
    db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
    db.Column('course_id', db.Integer, db.ForeignKey('courses.id'))
    )

follow_user = db.Table('follow_user',
    db.Column('follower_id', db.Integer, db.ForeignKey('users.id')),
    db.Column('followed_id', db.Integer, db.ForeignKey('users.id'))
    )

latest_notifications_cache = SimpleCache()

class User(db.Model, UserMixin):
    __tablename__ = 'users'

    id = db.Column(db.Integer,primary_key=True)
    username = db.Column(db.String(255), unique=True) #用户名
    email = db.Column(db.String(255), unique=True)
    password = db.Column(db.String(255),nullable=False)
    active = db.Column(db.Boolean(), default=True) # 是否已经激活
    role = db.Column(db.String(20),default='User') # 用户或者管理员
    gender = db.Column(db.Enum('male','female','unknown'),default='unknown')
    identity = db.Column(db.Enum('Teacher', 'Student')) # 学生或者教师

    register_time = db.Column(db.DateTime(), default=datetime.utcnow)
    confirmed_at = db.Column(db.DateTime())
from rdkit import Chem
import rdkit.Chem.inchi
from rdkit.Chem.Descriptors import MolWt
from urllib.error import HTTPError
import cirpy
import re
from werkzeug.contrib.cache import SimpleCache
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D

cas_to_inchi_cache = SimpleCache()
cas_to_inchikey_cache = SimpleCache()
cas_to_smiles_cache = SimpleCache()
cas_to_names_cache = SimpleCache()
inchi_to_cas_cache = SimpleCache()
inchi_to_names_cache = SimpleCache()
inchikey_to_cas_cache = SimpleCache()
inchikey_to_smiles_cache = SimpleCache()
inchikey_to_inchi_cache = SimpleCache()
inchikey_to_names_cache = SimpleCache()
smiles_to_cas_cache = SimpleCache()
smiles_to_names_cache = SimpleCache()
name_to_inchi_cache = SimpleCache()
name_to_inchikey_cache = SimpleCache()
name_to_smiles_cache = SimpleCache()
name_to_cas_cache = SimpleCache()

caches = {}
caches['cas'] = {}
caches['cas']['smiles'] = cas_to_smiles_cache
caches['cas']['stdinchi'] = cas_to_inchi_cache
Beispiel #8
0
import pickle
import math
import json
from config import CONFIG
from exception.exceptions import NoDataFoundException, GeneralException

from flask import Flask, request
from werkzeug.contrib.cache import SimpleCache
import numpy
import pandas
import sklearn.neighbors as nb
import requests


app = Flask(__name__)
app_cache = SimpleCache()
DATAFRAME = None



"""
Creates the dataframe and stores it in the cache. If the dataframe has been created before, fetches object from cache.
"""
def _get_csv_dataframe():
	df = app_cache.get("csv_file")
	if df is None:
		df = pandas.read_csv(CSV_FILE_LOCATION)
		app_cache.set("csv_file", df)
	return df

Beispiel #9
0
from resources.dining import Dining, DiningInformation, DiningSearch, DiningToday
#from resources.weather import Weather
from resources.wifi import Wifi, WifiNearMe
from resources.laundry import Laundry
from resources.main import Main
from resources.free_food import FreeFood
from resources.ews_status import EWSStatus
from resources.athletic_schedule import AthleticSchedule
from resources.buildings import Buildings
from resources.directory import FacultyDirectory
from resources.daily_illini import News, SubCategoryNews, SportsNews, RecentNews
from resources.calendar import Calendar

app = Flask(__name__)
api = Api(app)
cache = SimpleCache(app)

# Define routes
api.add_resource(Main, '/')

'''Dining'''
api.add_resource(DiningToday, '/dining/<string:hall>')
api.add_resource(Dining, '/dining/<string:hall>/<string:dateFrom>/<string:dateTo>')
api.add_resource(DiningSearch, '/dining/search/<string:query>')
api.add_resource(DiningInformation, '/dining/information')

'''Wifi'''
api.add_resource(Wifi, '/wifi')
#api.add_resource(WifiNearMe, '/wifi/<string:latitude>/<string:longitude>')

#api.add_resource(Weather, '/weather')
Beispiel #10
0
__project_dir__ = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if __project_dir__ not in sys.path:
    sys.path.append(__project_dir__)
import jwt
import datetime
from werkzeug.contrib.cache import SimpleCache
from uuid import uuid4
import mongo_db
from log_module import get_logger
from mail_module import send_mail
from jwt.exceptions import InvalidSignatureError
"""身份验证模块,主要是和jwt相关的部分"""

logger = get_logger()
ObjectId = mongo_db.ObjectId
s_cache = SimpleCache()  # 内存型缓存,关闭系统就消失.


class GlobalSignature(mongo_db.BaseDoc):
    """
    全局数字签名,一般用于JWT的secret,
    注意,考虑性能,此集合的大小是固定的.目前只存储100个记录
    """
    _table_name = "global_signature"
    type_dict = dict()
    type_dict['_id'] = ObjectId
    type_dict['signature'] = str  # 数字签名
    type_dict['algorithm'] = str  # 算法
    type_dict['expire'] = int  # 过期时间.单位秒
    type_dict['time'] = datetime.datetime  # 创建时间.
Beispiel #11
0
elif "WEBFAF_ENVIRON_TEST" in os.environ:
    app.config.from_object("webfaf2.config.TestingConfig")
else:
    app.config.from_object("webfaf2.config.DevelopmentConfig")

db = SQLAlchemy(app)

if app.config["CACHE_TYPE"].lower() == "memcached":
    from werkzeug.contrib.cache import MemcachedCache
    flask_cache = MemcachedCache(['{0}:{1}'.format(
        app.config["MEMCACHED_HOST"],
        app.config["MEMCACHED_PORT"])],
        key_prefix=app.config["MEMCACHED_KEY_PREFIX"])
elif app.config["CACHE_TYPE"].lower() == "simple":
    from werkzeug.contrib.cache import SimpleCache
    flask_cache = SimpleCache()
else:
    from werkzeug.contrib.cache import NullCache
    flask_cache = NullCache()

if app.config["PROXY_SETUP"]:
    app.wsgi_app = ProxyFix(app.wsgi_app)

if app.config["OPENID_ENABLED"]:
    from flask.ext.openid import OpenID
    from openid_teams import teams
    oid = OpenID(app, safe_roots=[], extension_responses=[teams.TeamsResponse])
    from login import login
    app.register_blueprint(login)

from dumpdirs import dumpdirs
Beispiel #12
0
from functools import partial
import re

from flask import render_template, session, request, redirect, url_for
from werkzeug.contrib.cache import SimpleCache

from . import app
from . import github
from .const import PICS_REPO_NAME
from .github import HTTPError
from .utils import cached as cached_decorator


## Caching-related stuff
cache = SimpleCache(default_timeout=120)
cached = partial(cached_decorator, cache)

## Regexp for image files.
## We pre compile and keep it there in order to
## be able to unit-test it, not for performance reasons..
img_file_re = re.compile(r'^[0-9a-f]{10,40}\.(jpg|gif|png)$')


@app.context_processor
def add_user_info():
    user = cache.get('user_profile')
    if user is None:
        try:
            auth = github.get_session(token=session['token'])
            resp = auth.get('user')
Beispiel #13
0
class APIBase(esadapter.InitConnection):

    __cache = SimpleCache(threshold=config.CACHE_SIZE,
                          default_timeout=config.CACHE_TIMEOUT)

    def __init__(self):
        esadapter.InitConnection.__init__(self)
        Utils.setup_console_logging()

    def is_instance(self, prepid, index, doc_type):
        """
        Checks if prepid matches any typeof in the index
        """
        try:
            self.es.get_source(index=index, doc_type=doc_type, id=prepid)
        except elasticsearch.NotFoundError:
            return False

        return True

    def parse_query(self, query):
        """
        Returns query and index name
        First it checks if there are wildcards in query
        If there are wildcards, it checks for matching requests and rereco requests
        If there are no wildcards, check for exact matches in following order
        Order:
          campaign
          request
          chained campaign
          chained request
          ppd tag
          tag
          flow
          mcm dataset name
          mcm datatier
          rereco request
          rereco processing string
          rereco campaign
          relval request
          relval cmssw version
          relval campaign
        """
        cache_key = 'parse_query_%s' % (query)
        if self.__cache.has(cache_key):
            result = self.__cache.get(cache_key)
            logging.info('Found result in cache for key: %s' % cache_key)
            return result

        allowed_characters = ('abcdefghijklmnopqrstuvwxyz'
                              'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
                              '01234567890-_*')
        query_allowed_characters = ''.join(
            [x for x in query if x in allowed_characters])
        if '*' in query and len(query_allowed_characters.replace('*', '')) < 8:
            # If there are less than 8 non *  characters, do not search
            # This is done to avoid things like *-*-*
            return None, None

        if '*' in query:
            # Wildcard search
            # Lord have mercy on poor pMp
            if self.search(query, 'requests', page_size=1, max_results=1):
                result = ('prepid:%s' % (query), 'requests')

            elif self.search(query,
                             'rereco_requests',
                             page_size=1,
                             max_results=1):
                result = ('prepid:%s' % (query), 'rereco_requests')

            else:
                result = (None, None)
        else:
            # Exact match
            if self.is_instance(query, 'campaigns', 'campaign'):
                result = ('member_of_campaign:%s' % (query), 'requests')

            elif self.is_instance(query, 'requests', 'request'):
                result = ('prepid:%s' % (query), 'requests')

            elif self.is_instance(query, 'chained_campaigns',
                                  'chained_campaign'):
                result = ('member_of_campaign:%s' % (query),
                          'chained_requests')

            elif self.is_instance(query, 'chained_requests',
                                  'chained_request'):
                result = ('member_of_chain:%s' % (query), 'requests')

            elif self.is_instance(query, 'ppd_tags', 'ppd_tag'):
                result = ('ppd_tags:%s' % (query), 'requests')

            elif self.is_instance(query, 'tags', 'tag'):
                result = ('tags:%s' % (query), 'requests')

            elif self.is_instance(query, 'flows', 'flow'):
                result = ('flown_with:%s' % (query), 'requests')

            elif self.is_instance(query, 'mcm_dataset_names',
                                  'mcm_dataset_name'):
                result = ('dataset_name:%s' % (query), 'requests')

            elif self.is_instance(query, 'mcm_datatiers', 'mcm_datatier'):
                result = ('datatiers:%s AND status:submitted' % (query),
                          'requests')

            elif self.is_instance(query, 'rereco_requests', 'rereco_request'):
                result = ('prepid:%s' % (query), 'rereco_requests')

            elif self.is_instance(query, 'processing_strings',
                                  'processing_string'):
                result = ('processing_string:%s' % (query), 'rereco_requests')

            elif self.is_instance(query, 'rereco_campaigns',
                                  'rereco_campaign'):
                result = ('member_of_campaign:%s' % (query), 'rereco_requests')

            elif self.is_instance(query, 'relval_requests', 'relval_request'):
                result = ('prepid:%s' % (query), 'relval_requests')

            elif self.is_instance(query, 'relval_cmssw_versions',
                                  'relval_cmssw_version'):
                result = ('cmssw_version:%s' % (query), 'relval_requests')

            elif self.is_instance(query, 'relval_campaigns',
                                  'relval_campaign'):
                result = ('member_of_campaign:%s' % (query), 'relval_requests')

            else:
                result = (None, None)

        self.__cache.set(cache_key, result)
        return result

    def number_of_completed_events(self, stats_document, output_dataset):
        completed_events = 0
        if stats_document and output_dataset and 'event_number_history' in stats_document:
            for history_record in stats_document['event_number_history']:
                if history_record['dataset'] != output_dataset:
                    continue

                if len(history_record.get('history', [])) == 0:
                    break

                newest_entry = sorted(history_record.get('history', []),
                                      key=lambda k: k['time'])[-1]
                if newest_entry['type'] == 'VALID' or newest_entry[
                        'type'] == 'PRODUCTION':
                    completed_events = newest_entry.get('events', 0)

        return completed_events

    def get_info_for_estimate(self, req):
        """
        Return (tuple) a request name, output dataset and request manager name
        Returns name of request, it's dataset and request manager name that should
        be used to estimate number of completed events of given request
        """
        member_of_chains = req['member_of_chain']
        potential_results_with_events = []
        for member_of_chain in member_of_chains:
            chained_requests = self.search(query='prepid:%s' %
                                           (member_of_chain),
                                           index='chained_requests')

            logging.info('Will look in %s chained requests for %s estimate' %
                         (len(chained_requests), req['prepid']))
            for chained_request in chained_requests:
                chain = chained_request.get('chain', [])
                if req['prepid'] in chain:
                    following_requests = chain[chain.index(req['prepid']) + 1:]
                    for following_request_prepid in following_requests:
                        following_requests = list(
                            self.db_query(following_request_prepid,
                                          include_stats_document=True,
                                          estimate_completed_events=False))

                        if len(following_requests) == 0:
                            continue

                        stats_document, mcm_document = following_requests[0]
                        output_dataset = mcm_document.get('output_dataset')
                        request_manager_name = mcm_document.get('name')
                        request = mcm_document.get('prepid')
                        if output_dataset and request_manager_name:
                            potential_results_with_events.append(
                                (self.number_of_completed_events(
                                    stats_document, output_dataset), request,
                                 output_dataset, request_manager_name))
                            # Find next one that has dataset, no need to iterate over all of the chain
                            break

        potential_results_with_events = sorted(potential_results_with_events,
                                               key=lambda k: k[0])
        if len(potential_results_with_events) > 0:
            best_match = potential_results_with_events[0]
            logging.info(
                'Best match for %s is %s %s %s' %
                (req['prepid'], best_match[1], best_match[2], best_match[3]))
            return best_match[1], best_match[2], best_match[3]

        return None, None, None

    def db_query(self,
                 query,
                 include_stats_document=True,
                 estimate_completed_events=False,
                 skip_prepids=None,
                 request_filter=None):
        """
        Query DB and return array of raw documents
        Tuple of three things is returned: stats document, mcm document
        """

        req_arr = []
        if query in ('submitted', 'submitted-no-nano', 'submitted-all'):
            index = 'requests'
            es_query = 'status:submitted'
            requests = {x['prepid']: x for x in self.search(es_query, index)}
            if query == 'submitted-no-nano':
                requests = {
                    prepid: request
                    for prepid, request in requests.items()
                    if 'nanoaod' not in prepid.lower()
                }
                logging.info('Removed NanoAOD requests')

            if query in ('submitted', 'submitted-no-nano'):
                chained_reqs = set()
                logging.info('Found %s submitted requests', len(requests))
                for _, req in requests.items():
                    chained_reqs.update(req.get('member_of_chain', []))

                logging.info(
                    'Collected %s chained requests from these campaigns',
                    len(chained_reqs))
                chained_reqs = self.es.mget(index='chained_requests',
                                            doc_type='chained_request',
                                            body={'ids':
                                                  list(chained_reqs)})['docs']
                logging.info('Feched %s chained requests', len(chained_reqs))
                added_reqs = set()
                for chained_req in chained_reqs:
                    chain = chained_req['_source']['chain']
                    for prepid in reversed(chain):
                        req = requests.get(prepid)
                        if not req:
                            continue

                        if prepid not in added_reqs:
                            req_arr.append(req)
                            added_reqs.add(prepid)

                        break

                logging.info('Picked %s requests in these chaied requests',
                             len(req_arr))
            else:
                req_arr = [req for _, req in requests.items()]
                logging.info('Taking all %s submitted requests', len(req_arr))
        else:
            es_query, index = self.parse_query(query)
            logging.info('Query: %s, index: %s' % (es_query, index))
            if index is None:
                logging.info(
                    'Returning nothing because index for %s could not be found'
                    % (query))
                return []

            if index == 'chained_requests':
                chained_requests = self.search(es_query, index)
                for chained_request in chained_requests:
                    es_query, index = ('member_of_chain:%s' %
                                       (chained_request.get('prepid')),
                                       'requests')
                    req_arr.extend(self.search(es_query, index))
            else:
                req_arr = self.search(es_query, index)

        if index == 'requests':
            logging.info('Found %d requests for %s' % (len(req_arr), es_query))
        elif index == 'relval_requests':
            logging.info('Found %s RelVal requests from %s' %
                         (len(req_arr), es_query))
        elif index == 'rereco_requests':
            logging.info('Found %d ReReco requests for %s' %
                         (len(req_arr), es_query))

        # Iterate over array and collect details (McM documents)
        if index == 'rereco_requests' or index == 'relval_requests':
            output_dataset_index = -1
        else:
            output_dataset_index = 0

        if skip_prepids is None:
            skip_prepids = set()

        if request_filter:
            logging.info('Requests before request filter %s' % (len(req_arr)))
            req_arr = [req for req in req_arr if request_filter(req)]
            logging.info('Requests after request filter %s' % (len(req_arr)))

        req_mgr_names_set = set()
        req_mgr_names_map = {}
        for req in req_arr:
            if req.get('prepid', '') in skip_prepids:
                logging.info('Skipping %s as it is in skippable prepids list',
                             req.get('prepid', ''))
                continue

            dataset_list = req.get('output_dataset', [])
            if dataset_list == None:
                # For some reason output dataset becomes null
                refetched = self.es.get_source(index='requests',
                                               doc_type='request',
                                               id=req['prepid'])
                logging.info('Refetched %s because output datasets were null',
                             req['prepid'])
                dataset_list = refetched.get('output_dataset', [])

            if len(dataset_list) > 0:
                dataset = dataset_list[output_dataset_index]
            else:
                dataset = None

            if not dataset and estimate_completed_events and index == 'requests' and include_stats_document:
                logging.info('Will try to find closest estimate for %s' %
                             (req['prepid']))
                closest_request, closest_output_dataset, closest_request_manager = self.get_info_for_estimate(
                    req)
                if closest_request and closest_output_dataset and closest_request_manager:
                    logging.info(
                        'Will use %s dataset and %s request manager of %s as an estimate for %s'
                        % (closest_output_dataset, closest_request_manager,
                           closest_request, req['prepid']))
                    dataset = closest_output_dataset
                    req['reqmgr_name'] = [closest_request_manager]
                    req['estimate_from'] = closest_request

            req['force_completed'] = False
            req['expected'] = req['total_events']
            req['output_dataset'] = dataset
            for reqmgr_dict in req.get('reqmgr_status_history', []):
                if 'force-complete' in reqmgr_dict['history']:
                    req['force_completed'] = True
                    break

            # Get time of last transition to "submitted"
            for item in reversed(req['history']):
                if item['action'] == 'submitted':
                    req['submitted_time'] = item['time']
                    break

            # Get the time of the *last* transition to status "done"
            for item in reversed(req['history']):
                if item['action'] == 'done':
                    req['done_time'] = item['time']
                    break

            if not include_stats_document:
                req['reqmgr_name'] = []
            else:
                # Collect all reqmgr_names
                for reqmgr in req['reqmgr_name']:
                    req_mgr_names_set.add(reqmgr)

            if len(req_mgr_names_set) > 10000:
                self.fetch_workflows_into_dictionary(list(req_mgr_names_set),
                                                     req_mgr_names_map)
                req_mgr_names_set = set()

        self.fetch_workflows_into_dictionary(list(req_mgr_names_set),
                                             req_mgr_names_map)
        results_to_return = []
        for res in req_arr:
            for reqmgr in reversed(res['reqmgr_name']):
                if reqmgr in req_mgr_names_map:
                    res['name'] = reqmgr
                    results_to_return.append((req_mgr_names_map[reqmgr], res))
                    break
            else:
                results_to_return.append((None, res))

        return results_to_return

    def fetch_workflows_into_dictionary(self, workflow_ids, result_dictionary):
        """
        Fetch workflows using workflow_ids list of ids and fill result dictionary
        with results where workflow id is a key and it's _source is value
        Skip workflows that are resubmissions
        """
        if len(workflow_ids) == 0:
            return

        logging.info('Will try to get %s workflows' % len(workflow_ids))
        workflows = self.es.mget(index='workflows',
                                 doc_type='workflow',
                                 body={'ids': workflow_ids})['docs']

        found = 0
        for workflow in workflows:
            if workflow['found']:
                workflow_source = workflow['_source']
                if workflow_source.get(
                        'request_type').lower() != 'resubmission':
                    found += 1
                    result_dictionary[workflow['_id']] = workflow['_source']

        logging.info('Got %s workflows' % (found))

    def apply_filters(self, data, priority_filter, pwg_filter,
                      interested_pwg_filter, status_filter):
        """
        Priority filter is an array of min and max priorities
        PWG filter is list of strings (pwg) of requests to include
        Status filter is list of strings (status) of requests to include
        Return new data and dictionaries of pwgs and status filters that show whether
        these values were left (True) or filtered out (False)
        """
        logging.info('Requests before filtering %s' % (len(data)))
        new_data = []
        if pwg_filter is not None:
            pwg_filter = [x.upper() for x in pwg_filter if x]

        if interested_pwg_filter is not None:
            interested_pwg_filter = [
                x.upper() for x in interested_pwg_filter if x
            ]

        if status_filter is not None:
            status_filter = [x.lower() for x in status_filter if x]

        all_pwgs = {}
        all_interested_pwgs = {}
        all_statuses = {}
        for item in data:
            pwg = item.get('pwg', '').upper()
            if pwg not in all_pwgs:
                if pwg_filter is not None:
                    all_pwgs[pwg] = pwg in pwg_filter
                else:
                    all_pwgs[pwg] = True

            interested_pwgs = set(item.get('interested_pwg', []))
            for interested_pwg in interested_pwgs:
                if interested_pwg not in all_interested_pwgs:
                    if interested_pwg_filter is not None:
                        all_interested_pwgs[
                            interested_pwg] = interested_pwg in interested_pwg_filter
                    else:
                        all_interested_pwgs[interested_pwg] = True

            status = item.get('status', '').lower()
            if status not in all_statuses:
                if status_filter is not None:
                    all_statuses[status] = status in status_filter
                else:
                    all_statuses[status] = True

            try:
                priority = int(item.get('priority'))
            except:
                priority = None

            if priority is not None:
                if priority_filter is not None:
                    lower_priority = priority_filter[0]
                    upper_priority = priority_filter[1]
                    if lower_priority is not None and priority < lower_priority:
                        continue

                    if upper_priority is not None and priority >= upper_priority:
                        continue

            if all_pwgs[pwg] and all_statuses[status]:
                if interested_pwg_filter is None:
                    new_data.append(item)
                else:
                    for interested_pwg in interested_pwgs:
                        if all_interested_pwgs[interested_pwg]:
                            new_data.append(item)
                            break

        logging.info('Requests after filtering %s' % (len(new_data)))
        return new_data, all_pwgs, all_interested_pwgs, all_statuses

    def get_priority_block(self, priority):
        """
        Return priority block for given priority
        """
        priority = int(priority)
        if priority >= 130000:
            return 0

        if priority >= 110000:
            return 1

        if priority >= 90000:
            return 2

        if priority >= 85000:
            return 3

        if priority >= 80000:
            return 4

        if priority >= 70000:
            return 5

        if priority >= 63000:
            return 6

        return 7
Beispiel #14
0
class SuggestionsAPI(esadapter.InitConnection):
    """
    Used to search in elastic index for similar PrepIDs as given
    """

    __cache = SimpleCache(threshold=config.CACHE_SIZE,
                          default_timeout=config.CACHE_TIMEOUT)

    def __init__(self, typeof):
        esadapter.InitConnection.__init__(self)
        self.max_results_in_index = 200
        self.max_suggestions = 200
        self.present = (typeof == 'present')
        self.historical = (typeof == 'historical')
        self.performance = (typeof == 'performance')

    def get(self, query):
        """
        Get suggestions for the query
        Order:
          campaign
          request
          chained campaign
          chained request
          ppd tag
          tag
          flow
          mcm dataset name
          rereco request
          rereco processing string
          rereco campaign
          relval request
          relval cmssw version
          relval campaign
        """
        query = query.replace(' ', '*')
        cache_key = 'suggestions_%s' % (query)
        if self.__cache.has(cache_key):
            results = self.__cache.get(cache_key)
            if len(results) > 0:
                logging.info('Found %s suggestions in cache for %s' %
                             (len(results), cache_key))
                return json.dumps({'results': results})

        search = 'prepid:*%s*' % (query)

        results = []
        suggestion_queries = [
            {
                'type': 'RERECO CAMPAIGN',
                'index': 'rereco_campaigns'
            },
            {
                'type': 'CAMPAIGN',
                'index': 'campaigns'
            },
            # {'type': 'REQUEST', 'index': 'requests'},
            # {'type': 'CHAINED CAMPAIGN', 'index': 'chained_campaigns'},
            # {'type': 'CHAINED REQUEST', 'index': 'chained_requests'},
            {
                'type': 'PPD TAG',
                'index': 'ppd_tags'
            },
            {
                'type': 'TAG',
                'index': 'tags'
            },
            # {'type': 'FLOW', 'index': 'flows'},
            # {'type': 'MCM DATASET', 'index': 'mcm_dataset_names'},
            {
                'type': 'DATATIER',
                'index': 'mcm_datatiers'
            },
            # {'type': 'RERECO', 'index': 'rereco_requests'},
            {
                'type': 'PROCESSING STRING',
                'index': 'processing_strings'
            },
            # {'type': 'RELVAL', 'index': 'relval_requests'},
            # {'type': 'RELVAL CMSSW', 'index': 'relval_cmssw_versions'},
            # {'type': 'RELVAL CAMPAIGN', 'index': 'relval_campaigns'}]
        ]

        for suggestion_query in suggestion_queries:
            suggestion_results = [
                x['_id']
                for x in self.search(search, suggestion_query['index'], self.
                                     max_suggestions, self.max_suggestions)
            ]
            suggestion_query['all_suggestions'] = [{
                'type':
                suggestion_query['type'],
                'label':
                x
            } for x in suggestion_results]
            suggestion_query['selected_suggestions'] = []

        used_suggestions = set()
        for i in range(self.max_suggestions):
            for suggestion_query in suggestion_queries:
                if i < len(suggestion_query.get('all_suggestions', [])):
                    suggestion = suggestion_query['all_suggestions'][i]
                    if suggestion['label'] not in used_suggestions:
                        suggestion_query['selected_suggestions'].append(
                            suggestion)
                        used_suggestions.add(suggestion['label'])

                if len(used_suggestions) >= self.max_suggestions:
                    break

            if len(used_suggestions) >= self.max_suggestions:
                break

        selected_results = []
        for suggestion_query in suggestion_queries:
            selected_results.extend(
                sorted(suggestion_query.get('selected_suggestions'),
                       key=lambda x: x['label']))

        results = selected_results
        logging.info('Found %s suggestions for %s' % (len(results), search))

        self.__cache.set(cache_key, results)
        return json.dumps({'results': results})
Beispiel #15
0
def setup_caching(app):
    app.cache = SimpleCache()
    UnsubscribeConfirmation = 'UnsubscribeConfirmation'


class InvalidMessageTypeException(Exception):
    pass


def verify_message_type(message_type: str):
    try:
        SNSMessageType(message_type)
    except ValueError:
        raise InvalidMessageTypeException(
            f'{message_type} is not a valid message type.')


certificate_cache = SimpleCache()


def get_certificate(url):
    res = certificate_cache.get(url)
    if res is not None:
        return res
    res = requests.get(url).content
    certificate_cache.set(url, res, timeout=60 * 60)  # 60 minutes
    return res


# 400 counts as a permanent failure so SNS will not retry.
# 500 counts as a failed delivery attempt so SNS will retry.
# See https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html#DeliveryPolicies
# This should not be here, it used to be in notifications/notifications_ses_callback. It then
Beispiel #17
0
import json
import logging
import math
from random import randint
import re
import time

from google.appengine.api import memcache, urlfetch
from google.appengine.ext import ndb

from werkzeug.contrib.cache import SimpleCache

from roger import config

# used in local thread
_local_cache = SimpleCache(threshold=500, default_timeout=300)


def _cache_get(key):
    data = _local_cache.get(key)
    if data:
        return data
    return memcache.get(key)


def _cache_set(key, value, ttl):
    _local_cache.set(key, value, ttl)
    memcache.set(key, value, time=ttl)


API_BASE = 'https://maps.googleapis.com/maps/api'
Beispiel #18
0
def build_resolver(configuration_file):
    """

    :param configuration_file:
    :return: Organizer, Resolver and Cache handler
    """
    with open(configuration_file) as f:
        xml = etree.parse(f)

    directories = [
        # Compute path relative to the configuration files
        relative_folder(configuration_file, directory)
        for directory in xml.xpath("//corpora/corpus/text()")
    ]
    default_collection = None
    general_collection = CtsTextInventoryCollection()
    filters_to_register = []

    for collection in xml.xpath("//collections/collection"):
        identifier = collection.xpath("./identifier/text()")[0]
        if collection.get("default") == "true":
            default_collection = identifier

        current_collection = CtsTextInventoryMetadata(
            identifier, parent=general_collection)
        for name in collection.xpath("./name"):
            current_collection.set_label(name.text, name.get("lang"))

        prefix_filters = []
        citation_filters = []
        directory_filters = []

        # We look at dispatching filters in the collection
        for filters in collection.xpath("./filters"):
            # We register prefix filters
            for prefix in filters.xpath("./id-starts-with/text()"):
                prefix_filters.append(
                    lambda collection, starts_with=prefix: str(
                        collection.id).startswith(starts_with))

            # We register citation filters
            for citation_name in filters.xpath("./citation-contains/text()"):
                citation_filters.append(
                    lambda collection, citation_system=citation_name:
                    citation_contain_filter(collection, citation_system))

            # We register path based filters
            for target_directory in filters.xpath("./folder/text()"):
                current_folder = relative_folder(configuration_file,
                                                 target_directory)
                directory_filters.append(
                    lambda collection, path=None, starts_with=current_folder:
                    path.startswith(starts_with))

        if prefix_filters or citation_filters or directory_filters:
            filters_to_register += [
                (identifier,
                 collection_dispatcher_builder(prefix_filters,
                                               citation_filters,
                                               directory_filters))
            ]

    # Create the dispatcher
    organizer = CollectionDispatcher(general_collection,
                                     default_inventory_name=default_collection)

    for destination_collection, anonymous_dispatching_function in filters_to_register:
        organizer.add(anonymous_dispatching_function, destination_collection)

    # Set-up the cache folder
    # ToDO : Add a system for redis ?
    cache = None
    for cache_folder in xml.xpath("//cache-folder/text()"):
        cache = FileSystemCache(cache_folder)
    if cache is None:
        cache = SimpleCache()

    resolver = NautilusCTSResolver(resource=directories,
                                   dispatcher=organizer,
                                   cache=cache)

    return organizer, resolver, cache
Beispiel #19
0
from flask import *
from pdb import set_trace
from werkzeug import secure_filename
import os
from sift import *
from werkzeug.contrib.cache import SimpleCache
import pdb
app = Flask(__name__, static_url_path="")
UPLOAD_FOLDER = './static/img'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

# ---caching---
cache = SimpleCache(threshold=2000, default_timeout=3000)
indexed_images_folder = 'indexed_images/'
cache_list = {}

for i in range(1, 1000):
    file = indexed_images_folder + '/' + str(i) + '.txt'
    db_descriptor, db_image_path = pickle.load(open(file, "rb"))

    cache_list[db_image_path] = (db_descriptor)

cache.set('db_images', cache_list)
del cache_list

# ---end caching---


@app.route("/")
def hello():
    return render_template('home.html', user_image="/a.jpg")
Beispiel #20
0
import base64
import os
import time
import urllib.parse
import json
from werkzeug.contrib.cache import SimpleCache

from config import SPOTIFY_REDIRECT_URI, SPOTIFY_CLIENT_SECRET, SPOTIFY_CLIENT_ID
from common import call

simpleCacheObject = SimpleCache()

SPOTIFY_SCOPE = "user-read-private user-follow-read user-top-read"
SPOTIFY_API_URL = 'https://api.spotify.com/v1/'

OAUTH_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize/'
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token/'


class SpotifyException(Exception):
    pass


class Spotify:
    def __init__(self):
        self.client_id = SPOTIFY_CLIENT_ID
        self.client_secret = SPOTIFY_CLIENT_SECRET
        self.redirect_uri = SPOTIFY_REDIRECT_URI
        self.scope = SPOTIFY_SCOPE
        self.api_url = SPOTIFY_API_URL
        self.authorisation_url = OAUTH_AUTHORIZE_URL
Beispiel #21
0
"""
@author: zhanghe
@software: PyCharm
@file: views.py
@time: 16-1-7 上午12:10
"""

from app import app, login_manager, oauth_github, oauth_qq, oauth_weibo, send_cloud_client, qi_niu_client
from flask import render_template, request, url_for, send_from_directory, session, flash, redirect, g, jsonify, Markup, abort
from app.forms import RegForm, LoginForm, BlogAddForm, BlogEditForm, UserForm
from app.login import LoginUser
from flask_login import login_user, logout_user, current_user, login_required
import os
import json
from werkzeug.contrib.cache import SimpleCache
cache = SimpleCache()  # 默认最大支持500个key, 超时时间5分钟, 参数可配置


@login_manager.user_loader
def load_user(user_id):
    """
    如果 user_id 无效,它应该返回 None ( 而不是抛出异常 )。
    :param user_id:
    :return:
    """
    return LoginUser.query.get(int(user_id))


@app.before_request
def before_request():
    """
Beispiel #22
0
 def __init__(self):
     """Initialize the cache."""
     super(ImageSimpleCache, self).__init__()
     self.cache = SimpleCache()
Beispiel #23
0
    def deletes(item):
        if item[0] == 'memoize' and item[1] in keys:
            cache.delete(item)
            return True
        return False

    _memoized[:] = [x for x in _memoized if not deletes(x)]


#: the cache system factories.
CACHE_SYSTEMS = {
    'null':
    lambda: NullCache(),
    'simple':
    lambda: SimpleCache(ctx.cfg['caching.timeout']),
    'memcached':
    lambda: MemcachedCache([
        x.strip() for x in ctx.cfg['caching.memcached_servers'].split(',')
    ], ctx.cfg['caching.timeout']),
    'filesystem':
    lambda: FileSystemCache(join(ctx.cfg['caching.filesystem_cache_path']),
                            threshold=500,
                            default_timeout=ctx.cfg['caching.timeout']),
    'database':
    lambda: DatabaseCache(ctx.cfg['caching.timeout']),
    'gaememcached':
    lambda: GAEMemcachedCache(ctx.cfg['caching.timeout'])
}

Beispiel #24
0
from flask import Flask, g, render_template, request, jsonify, url_for, Response
import chess, chess.uci
import time
from contextlib import closing
from werkzeug.contrib.cache import SimpleCache
import os
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import desc, func
import json
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
app.cache = SimpleCache()

import models


#DATABASE FUNCTIONS
def init_db():
    db.create_all()


def create_new_game():

    print "Creating new game!"
    game = models.Games(int(time.time()))
    db.session.add(game)
    db.session.commit()
    g = get_last_game()
    start_pos = chess.STARTING_FEN
    update_pos_db(g, start_pos, 'None')
Beispiel #25
0
import requests
from flask import current_app as app, url_for
from mhn.ui import constants
from config import MHN_SERVER_HOME
import os
from werkzeug.contrib.cache import SimpleCache
from ipaddress import ip_address
import struct
from mhn.api.models import Sensor

flag_cache = SimpleCache(threshold=1000, default_timeout=300)
country_cache = SimpleCache(threshold=1000, default_timeout=300)
sensor_cache = SimpleCache(threshold=1000, default_timeout=300)


def is_private_addr(ip):
    # 10.0.0.0/8
    # 127.0.0.0/8
    # 172.16.0.0/12
    # 192.168.0.0/16
    # fc00::/7 (unique local addresses)
    # ::1/128 (localhost)

    try:
        ip_obj = ip_address(ip)
        # Make exception for ::ffff/96 (ipv4-mapped)
        if ip_obj.version == 6 and ip_obj.ipv4_mapped:
            return False
        if ip_obj.is_private:
            return True
    except Exception as e:
Beispiel #26
0
 def setUp(self):
     self.patcher = patch("flask_ask.core.find_ask", return_value=Ask())
     self.ask = self.patcher.start()
     self.user_id = "dave"
     self.token = "123-abc"
     self.cache = SimpleCache()
Beispiel #27
0
import os
import json

DB_HOST = os.environ.get("DB_HOST", "redis://cache")
DB_PORT = os.environ.get("DB_PORT", 6379)
MAX_CONNECTION = os.environ.get("MAX_CONNECTION")
CACHE_SIZE = os.environ.get("CACHE_SIZE", 2)
LOCAL_EXPIRE = os.environ.get("CACHE_EXPIRY", 0.5)  # minutes
POP_SIZE = 1
CACHE_KEYS = "LRU-KEYS"  # Used as a purpose to hold ranks of each keys
CACHE_STORE = "LRU-STORE"  # Used to store key-value pair

app = Flask(__name__)

cache = SimpleCache()

redis_db = StrictRedis(
    host=DB_HOST,
    port=DB_PORT,
    decode_responses=True,
)
q = Queue(connection=redis_db)
pipeline_object = redis_db.pipeline()


def run():
    app.run(debug=True)


def set_item(key, value):
Beispiel #28
0
import settings
from flask import Flask, session, redirect, url_for, request, send_from_directory, render_template
from views.dashboard import dashboard
from views.scrapyd import scrapyd, fetch_scrapyd_agent
from views.user import user

app = Flask(__name__)
app.config.from_object(settings.DevelopmentConfig)
app.secret_key = os.urandom(24)

# init global cache
if app.config['ENABLE_CACHE']:
    from werkzeug.contrib.cache import SimpleCache

    app.config['GLOBAL_CACHE'] = SimpleCache()

# register scrapyd agent
fetch_scrapyd_agent(app.config['SCRAPYD_URL'])

app.register_blueprint(dashboard, url_prefix='/supervisor/dashboard')
app.register_blueprint(user, url_prefix='/supervisor/user')
app.register_blueprint(scrapyd, url_prefix='/supervisor/scrapyd')


@app.route('/favicon.ico')
def favicon():
    return send_from_directory(os.path.join(app.root_path, 'static'),
                               'favicon.ico',
                               mimetype='image/vnd.microsoft.icon')
Beispiel #29
0
def simple(app, args, kwargs):
    kwargs.update(dict(threshold=app.config['CACHE_THRESHOLD']))
    return SimpleCache(*args, **kwargs)
Beispiel #30
0
 def __simple_cache(self):
     from werkzeug.contrib.cache import SimpleCache
     return SimpleCache()