Ejemplo n.º 1
0
class Store(SessionStore):

    def __init__(self, session_class=None):
        super(Store, self).__init__(session_class)
        if settings.DATABASE_ENGINE == 'gae':
            self.cache = GAEMemcachedCache(default_timeout=0)
        else:
            server = settings.SESSION_OPTIONS.get('memcached_servers', [])
            self.cache = MemcachedCache(servers, default_timeout=0)

    def save(self, session):
        self.cache.set(session.sid, dict(session))

    def delete(self, session):
        self.cache.delete(session.sid)

    def get(self, sid):
        if not self.is_valid_key(sid):
            return self.session_class.new()
        try:
            data = self.cache.get(sid)
        except:
            data = {}
        return self.session_class(data, sid, False)

    def list(self):
        return self.cache.get_dict().keys()
Ejemplo n.º 2
0
Archivo: ninja.py Proyecto: zinw/l2py
class MemcachedSessionStore(SessionStore):
    """A session store that saves sessions with memcache.
    :param session_class: The session class to use.  Defaults to
                          :model:`Session`.
    :param servers: a list or tuple of server addresses or a compatible client.
                    Defaults to `['127.0.0.1:11211']`.
    :param default_timeout: the default timeout that is used if no timeout is specified.
                            A timeout of 0 indicates that the cache never expires.
    :param key_prefix: a prefix that is added before all keys.  This makes it
                       possible to use the same memcached server for different
                       applications.
    """
    def __init__(self,
                 session_class=None,
                 servers=None,
                 default_timeout=600,
                 key_prefix=None):
        SessionStore.__init__(self, session_class)
        self.mc = MemcachedCache(servers, default_timeout, key_prefix)

    def get(self, key):
        return self.mc.get(key)

    def set(self, key, value, timeout=None):
        self.mc.set(key, value, timeout)

    def get_session(self, sid):
        data = self.get(sid)
        if data is None:
            data = {}
        return self.session_class(dict(data), sid, False)
Ejemplo n.º 3
0
class MemcachedSessionStore(SessionStore):
    """A session store that saves sessions with memcache.
    :param session_class: The session class to use.  Defaults to
                          :model:`Session`.
    :param servers: a list or tuple of server addresses or a compatible client.
                    Defaults to `['127.0.0.1:11211']`.
    :param default_timeout: the default timeout that is used if no timeout is specified.
                            A timeout of 0 indicates that the cache never expires.
    :param key_prefix: a prefix that is added before all keys.  This makes it
                       possible to use the same memcached server for different
                       applications.
    """

    def __init__(self, session_class=None, servers=None, default_timeout=600, key_prefix=None):
        SessionStore.__init__(self, session_class)
        self.mc = MemcachedCache(servers, default_timeout, key_prefix)

    def get(self, key):
        return self.mc.get(key)

    def set(self, key, value, timeout=None):
        self.mc.set(key, value, timeout)

    def get_session(self, sid):
        data = self.get(sid)
        if data is None:
            data = {}
        return self.session_class(dict(data), sid, False)
Ejemplo n.º 4
0
class Store(SessionStore):

    def __init__(self, session_class=None):
        super(Store, self).__init__(session_class)
        if settings.DATABASE_ENGINE == 'gae':
            self.cache = GAEMemcachedCache(default_timeout=0)
        else:
            server = settings.SESSION_OPTIONS.get('memcached_servers', [])
            self.cache = MemcachedCache(servers, default_timeout=0)

    def save(self, session):
        self.cache.set(session.sid, dict(session))

    def delete(self, session):
        self.cache.delete(session.sid)

    def get(self, sid):
        if not self.is_valid_key(sid):
            return self.session_class.new()
        try:
            data = self.cache.get(sid)
        except:
            data = {}
        return self.session_class(data, sid, False)

    def list(self):
        return self.cache.get_dict().keys()
Ejemplo n.º 5
0
 def decorated_function(*args, **kwargs):
     cache = MemcachedCache(['127.0.0.1:11211'])
     key = '{url}{data}'.format(url=args[0], data=pickle.dumps(args))
     hash_ = hashlib.md5(key.encode()).hexdigest()
     if not cache.has(hash_):
         status_code, cached_data = f(*args, **kwargs)
         if not kwargs.get('nocache', False) and status_code == 200:
             cache.set(hash_, (status_code, cached_data), timeout=5 * 60)
     else:
         status_code, cached_data = cache.get(hash_)
     return status_code, cached_data
Ejemplo n.º 6
0
def get_sfc_uuid():
    """Retrieves a unique identifier in order to compose a SFC

        Retrieves a uuid4 identifier to compose a SFC and get a copy of the vnffg template.
    :return: a unique identifier str.
    """

    vnffgd = deepcopy(vnffgd_template)
    sfc_uuid = str(uuid.uuid4())

    cache = MemcachedCache()
    cache.set(sfc_uuid, vnffgd)

    return jsonify({'sfc_uuid': sfc_uuid})
Ejemplo n.º 7
0
class Cache():
    def __init__(self):
        if webapp.config['APP_ENV'] == 'dev':
            from werkzeug.contrib.cache import SimpleCache
            self.cache = SimpleCache()
        else:
            from werkzeug.contrib.cache import MemcachedCache
            self.cache = MemcachedCache(['127.0.0.1:11211'])

    def get(self, cache_key=''):
        if 'cache' in request.args and request.args.get('cache') == 'clear':
            user_data = session.get('_user', None)
            if user_data and user_data['is_admin']:
                return None
        return self.cache.get(cache_key)

    def set(self, cache_key='', data=None, timeout=1000):
        self.cache.set(cache_key, data, timeout)
        return True
Ejemplo n.º 8
0
def include_sfc_acl():
    """Includes ACL criteria in VNFFGD

    JSON arguments are:
        - sfc_uuid: the unique identifier of the SFC being composed
        - acl: a dict containing the acl criteria to be added into the vnffgd template

    :return: OK if success, or ERROR and its reason if not
    """
    vnffgd = None

    cache = MemcachedCache()

    if 'sfc_uuid' in request.json:
        vnffgd = cache.get(request.json['sfc_uuid'])

    if not vnffgd:
        return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'})

    acl = request.json['acl']
    topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template']

    criteria = topology_template['node_templates']['Forwarding_path1'] \
                                ['properties']['policy']['criteria']

    res, acl = acl_criteria_parser(acl)

    if res != OK:
        return jsonify({'status': ERROR, 'reason': acl})

    for rule in acl:
        criteria.append(rule)

    #debug
    logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True))

    cache.set(request.json['sfc_uuid'], vnffgd)

    return jsonify({'status': OK})
Ejemplo n.º 9
0
class RepoCache:
    def __init__(self, timeout=18000, version=None):
        cache_host = os.environ.get('CACHE_HOST')
        cache_port = os.environ.get('CACHE_PORT')
        # cache_username = os.environ.get('CACHE_USERNAME')
        # cache_password = os.environ.get('CACHE_PASSWORD')

        self._cache = MemcachedCache(['{}:{}'.format(cache_host, cache_port)])
        # self._cache = bmemcached.Client(
        # [cache_host], username=cache_username, password=cache_password)

        self._timeout = timeout
        self._version = version

    def get_cache(self, key):
        return self._cache.get(key)

    def set_cache(self, key, value):
        return self._cache.set(key, value, self._timeout)

    def delete_cache(self, key):
        return self._cache.delete(key, self._version)
Ejemplo n.º 10
0
Archivo: init.py Proyecto: lwpie/nba
from werkzeug.contrib.cache import MemcachedCache

cache = MemcachedCache(['127.0.0.1:11211'])

data_path = r'/var/www/nba/api/data/'

# codes index
codes = {}

resource_file = os.path.join(data_path, 'teams.json')
data = json.load(open(resource_file, 'r', encoding='utf-8'))
for team in sum(data.values(), []):
    codes[team['code']] = team['name']

cache.set('codes', codes, timeout=2592000)

# news
resource_file = os.path.join(data_path, 'news.json')
data = json.load(open(resource_file, 'r', encoding='utf-8'))
index = data['index']
teams = data['teams']
news = data['news']

cache.set('index', index, timeout=2592000)
cache.set('teams', teams, timeout=2592000)
cache.set('news_l', len(news), timeout=2592000)
cache.set('news', news, timeout=2592000)

# speed up by dividing into smaller chunks
chunk = 100
Ejemplo n.º 11
0
    print section
    start = time.time()

    series_list = [p[1] for p in series[0]["points"]]
    from sys import getsizeof

    print "size of series data", getsizeof(series)
    print "size of series list", getsizeof(series_list)

    duration(section, start)

    section = "cache:: store series_list"
    print section
    start = time.time()

    cache.set("influxdb_list_series", series_list, 60 * 20)
    duration(section, start)

    if cfg.get("cheat_times"):
        print "cheating on start/end time. no further influx queries needed..."
        continue

    section = "influxdb:: select * from // order asc limit 1"
    print section
    start = time.time()
    series = client.query("select * from // order asc limit 1")
    duration(section, start)

    section = "cache:: store first-point for all series"
    print section
    start = time.time()
Ejemplo n.º 12
0
class Memcache:
    WAIT_ASK = 1
    WAIT_ANSWER = 2

    def __init__(self):
        self.cache = MemcachedCache(['127.0.0.1:11211'])

    def get(self, key):
        ret = self.cache.get(key)
        try:
            ret = ret.decode('utf-8')
        except:
            pass
        return ret

    def set(self, key, value):
        try:
            value = value.encode('utf-8')
        except:
            pass
        return self.cache.set(key, value, timeout=600)

    def delete(self, key):
        return self.cache.delete(key)

    def check_history(self, qid, gid=0):
        r = self.get(u'H' + str(qid) + u'G' + str(gid))
        if r is None:
            return r, r
        if r == self.WAIT_ANSWER:
            return self.WAIT_ANSWER, self.get(u'A' + str(qid) + u'G' +
                                              str(gid))
        if r == self.WAIT_ASK:
            return self.WAIT_ASK, None
        return None

    def set_before_ask(self, qid, gid=0):
        self.set(u'H' + str(qid) + u'G' + str(gid), self.WAIT_ASK)

    def set_before_answer(self, qid, ask, gid=0):
        self.set(u'A' + str(qid) + u'G' + str(gid), ask)
        self.set(u'H' + str(qid) + u'G' + str(gid), self.WAIT_ANSWER)

    def clear_state(self, qid, gid=0):
        self.delete(u'A' + str(qid) + u'G' + str(gid))
        self.delete(u'H' + str(qid) + u'G' + str(gid))

    def last_warn_time(self, qid):
        self.set(u'W' + str(qid), time.time())

    def check_last_chat_same(self, qid, msg):
        if self.get(u'LC' + str(qid)) and self.get(u'LC' + str(qid)) == msg:
            return self.last_chat_count(qid, add=True)
        self.set(u'LC' + str(qid), msg)
        self.set(u'LCC' + str(qid), 1)
        return True

    def last_chat_count(self, qid, add=False):
        lcc = self.get(u'LCC' + str(qid))
        if not lcc:
            self.set(u'LCC' + str(qid), 1)
            return True
        if add and lcc <= AdvancedSettings.same_response_limit:
            self.cache.inc(u'LCC' + str(qid))
            lcc += 1
        return lcc < AdvancedSettings.same_response_limit

    def check_block_user(self, qid):
        x = self.get(u'BU' + str(qid))
        if x is not None:
            return x
        db = MyDB()
        is_blocked = db.check_blocked(qid, 'user')
        logging.info("is_blocked")
        logging.info(is_blocked)
        self.set(u'BU' + str(qid), is_blocked)
        return is_blocked

    def check_block_group(self, gid):
        x = self.get(u'BG' + str(gid))
        if x is not None:
            return x
        db = MyDB()
        is_blocked = db.check_blocked(gid, 'group')
        self.set(u'BG' + str(gid), is_blocked)
        return is_blocked

    def check_disable_group(self, gid):
        x = self.get(u'BD' + str(gid))
        if x is not None:
            return x
        db = MyDB()
        is_disabled = db.check_disabled(gid)
        self.set(u'BD' + str(gid), is_disabled)
        return is_disabled
Ejemplo n.º 13
0
def include_sfc_traffic_origin():
    """Includes ACL criteria according to INTERNAL or EXTERNAL traffic source

    INTERNAL traffic is sourced from VNFs managed by NFVO, while EXTERNAL traffic is sourced from everything
    out from NFVO networks.
    This function also includes specific requirements to select the source port for any NFVO.
    Currently, it just supports Tacker NFVO.
    Tacker has the requirement for 'network_source_port_id' in ACL criteria, which is included  in VNFFGD
    by this function.
    One important rule is applied:
        1. Tacker's network_name from the origin VNF CP must be the same as the input CP of the first VNF in the chain.
           If there are more CPs than 1, then a message with status OPTIONS and a cp_list is replied in order to the
           user inform a desirable connection point.

    JSON input arguments are:
        - sfc_uuid: the unique identifier to the SFC being composed
        - origin: if the SFC traffic source is INTERNAL or EXTERNAL
        - vnf_id: the VNF unique identifier from the NFVO
        - resource: optional when using INTERNAL origin. Identifies the user manually input of the cp_out

    :return: OK if success, or ERROR and its reason if not, or OPTIONS and a cp_list dict
    """
    vnffgd = None

    cache = MemcachedCache()

    if 'sfc_uuid' in request.json:
        vnffgd = cache.get(request.json['sfc_uuid'])

    if not vnffgd:
        return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'})

    database = DatabaseConnection()

    origin = request.json['origin']

    topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template']
    # sfp = service function path
    sfp_cps = topology_template['groups']['VNFFG1']['properties']['connection_point']
    sfp_vnfs = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']
    # network_src_port_id is a requirement for Tacker NFVO
    criteria = topology_template['node_templates']['Forwarding_path1'] \
                                ['properties']['policy']['criteria']

    _, catalog = database.list_catalog(vnfd_name=sfp_vnfs[0])
    sfp_first_pkg_id = catalog[0]['_id']
    sfp_first_vnf_cps = list_vnf_pkg_cps(sfp_first_pkg_id, internal=True)

    if origin == INTERNAL:
        vnf_id = request.json['vnf_id']

        resp, data = database.list_vnf_instances(vnf_id=vnf_id)
        if resp != OK:
            return jsonify({'status': resp, 'reason': data})

        # Only VNFs instantiated by this framework can be used as origin,
        # as we need get information of its CP on VNF Packages
        if not data:
            return jsonify({'status': ERROR, 'reason': 'The chosen VNF was not instantiated by Holistic-Composer!'})

        vnf_pkg_id = data[0]['vnf_pkg_id']
        vnf_pkg_cps = list_vnf_pkg_cps(vnf_pkg_id, internal=True)

        # Leave just the CPs that are in the same subnet of the first VNF CP_in of the SFC
        cps = vnf_pkg_cps.keys()
        cps = list(cps)
        for cp in cps:
            if vnf_pkg_cps[cp]['network_name'] != sfp_first_vnf_cps[sfp_cps[0]]['network_name']:
                vnf_pkg_cps.pop(cp)

        # resource means the CP_out
        if 'resource' not in request.json:
            # Selects the suitable CP_out automatically
            if not vnf_pkg_cps:
                return jsonify({'status': ERROR, 'reason': 'No suitable CP on this VNF!'})

            if len(vnf_pkg_cps) == 1:
                cp_name = list(vnf_pkg_cps.keys())[0]

            else:
                return jsonify({
                    'status': OPTIONS,
                    'cp_list': vnf_pkg_cps
                })

        else:
            cp_name = request.json['resource']

            if cp_name not in vnf_pkg_cps:
                return jsonify({'status': ERROR, 'reason': 'Invalid CP!'})

        response = get_vnf_nfvo_resource_id(vnf_id, cp_name)
        if response['status'] != OK:
            return jsonify(response)

        criteria.append({'network_src_port_id': response['resource_id']})

    elif origin == EXTERNAL:
        resp, data = tacker_agent.get_fip_router_interface_id(
                            sfp_first_vnf_cps[sfp_cps[0]]['network_name'])

        if resp == OK:
            criteria.append({'network_src_port_id': data})

        else:
            return jsonify({
                'status': resp,
                'reason': data
            })

    else:
        return jsonify({
            'status': ERROR,
            'reason': 'Error 500: Network traffic source to SFP should be INTERNAL or EXTERNAL.'
        })

    # debug
    logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True))

    cache.set(request.json['sfc_uuid'], vnffgd)

    return jsonify({'status': OK})
Ejemplo n.º 14
0
class Query(object):

    default_configuration = RecursiveDict({
        'query_directories': [],
        'cached': False,
        'execute_queries': True,
        })

    def __init__(self, query, params=None, from_file=True, model_class=None,
                 database='default', sessions=sessions, cached=False,
                 replace=None, config=None, cache_timeout=3600*12):

        self.config = self.default_configuration
        self.config.update_recursive(configs.get('mysql', {}))
        if config:
            self.config.update_recursive(config)

        # add project path queries directory if not already configured
        project_queries = os.path.join(get_project_path(), 'queries')
        if not project_queries in self.config['query_directories']:
            self.config['query_directories'].append(project_queries)

        if from_file:
            if query in file_queries:
                logger.debug('Getting query file from query files cache %s' %
                             query)
                self.query = file_queries[query]
            else:
                logger.debug('Getting query file %s' % query)
                file_queries[query] = self._get_from_file(query)
                self.query = file_queries[query]
        else:
            self.query = query

        if replace:
            self.query = self.query % replace

        self.params = params
        self.session = sessions.get(database)
        self.result = None
        self.model_class = model_class
        self.cached = cached
        self.query_cache = MemcachedCache(['127.0.0.1:11211'])
        self.cache_timeout = cache_timeout
        self.database = database

    def _gen_mem_key(self):
        params_string=''
        if self.params:
            ordered_values =  OrderedDict(sorted(self.params.items(),
                key=lambda t: t[0]))
            for value in ordered_values:
                params_string = params_string +'_'+ value
            return str(hash(self.database+'_'+self.query+params_string))

    def _get_from_file(self, query):
        for query_directory in self.config.get('query_directories'):
            for dirpath, dirnames, filenames in os.walk(query_directory,
                                                        followlinks=True):
                for name in filenames:
                    if '%s.sql' % query in os.path.join(dirpath, name):
                        f = file(os.path.join(dirpath, name))
                        query = f.read()
                        f.close()
                        return query
        raise QueryFileNotFound('%s.sql not found in any of %s' %
                                (query, self.config.get('query_directories')))

    def _raw_query(self):
        if self.config.get('execute_queries'):
            self.result = self.session.execute(self.query, self.params)
        else:
            self.result = None

    def _raw_query_with_model(self):
        if self.config.get('execute_queries'):
            self.result = self.session.query(self.model_class).from_statement(self.query).params(**self.params)
        else:
            self.result = None

    def _query(self):
        if self.model_class:
            self._raw_query_with_model()
        else:
            self._raw_query()

    def execute(self):
        self._query()
        return self.result

    def one(self):
        cache_key = ''
        if self.cached:
            cache_key = self._gen_mem_key()
            cached_query = self.query_cache.get(cache_key)
            if cached_query:
                return cached_query
        self._query()
        if self.result:
            if self.model_class:
                _one = self.result.one()
                if self.cached:
                    self.query_cache.set(cache_key,
                                         _one,self.cache_timeout)
                return _one
            else:
                fetch_one = self.result.fetchone()
                if self.cached:
                    self.query_cache.set(cache_key,
                                         fetch_one, self.cache_timeout)
                return fetch_one
        if self.cached:
            self.query_cache.set(cache_key, self.result, self.cache_timeout)
        return self.result

    def all(self):
        cache_key = ''

        if self.cached:
            cache_key = self._gen_mem_key()
            cached_query = self.query_cache.get(cache_key)
            if cached_query:
                return cached_query
        self._query()
        if self.result:
            if self.model_class:
                _all = self.result.all()
                if self.cached:
                    query_cache.set(cache_key, _all, self.cache_timeout)
                return _all
            else:
                fetch_all = self.result.fetchall()
                if self.cached:
                    self.query_cache.set(cache_key, fetch_all,
                                         self.cache_timeout)
                return fetch_all
        if self.cached:
            self.query_cache.set(cache_key, self.result, self.cache_timeout)
        return self.result




    @staticmethod
    def last_insert_id():
        return Query('last_insert_id').one()[0]
Ejemplo n.º 15
0
    print section
    start = time.time()

    series_list = [p[1] for p in series[0]['points']]
    from sys import getsizeof
    print "size of series data", getsizeof(series)
    print "size of series list", getsizeof(series_list)

    duration(section, start)


    section = "cache:: store series_list"
    print section
    start = time.time()

    cache.set("influxdb_list_series", series_list, 60 * 20)
    duration(section, start)

    if cfg.get('cheat_times'):
        print "cheating on start/end time. no further influx queries needed..."
    else:
        section = "influxdb:: select * from // order asc limit 1"
        print section
        start = time.time()
        series = client.query("select * from // order asc limit 1")
        duration(section, start)

        section = "cache:: store first-point for all series"
        print section
        start = time.time()
Ejemplo n.º 16
0
from werkzeug.contrib.cache import MemcachedCache

#def get_my_item():
#	rv = cache.get('my-item')
#	print(rv)
#	if rv is None:
#		rv = calculate_value()
#		cache.set('my-item', rv, timeout=5*60)
#	return rv

if __name__ == "__main__":
    cache = MemcachedCache(['127.0.0.0:11211'])
    rv = cache.get('key')
    if rv is None:
        cache.set('key', 'value')
        print(cache)
        rv = cache.get('key')
    print(rv)
Ejemplo n.º 17
0
            citydetails.name = statedetails.cities[cityid]
            cache.set('city:' + statedetails.abbr + ':' + statedetails.cities[cityid] + ':details', citydetails, )
        allstates.append(statedetails)
        cache.set('states:' + str(id) + ':details', statedetails, )
    cache.set('allstates', allstates, 0)
def allCategories():
    res = db_session.query(Category).all();
    allcats = list()
    for cat in res:
        mycat = Objects.Cat(cat.id, cat.name)
        allcats.append(mycat)
    cache.set('allcats', allcats, )
    

    
def loadCache():
    #Warm up our cache.
    stateCache()
    mystateIds = cache.get('states:ids')
    for id in mystateIds:
        cityCache(id)
    allStatesIndex()
    allCategories()


loadCache()
cache.set('iswarm', 1, timeout=0)
#passhash = Objects.UserPassword('testing').pw_hash
#myUser = User(password=passhash,fname='Mike',lname='Gugino', email='*****@*****.**',phone='555-555')
#db_session.add(myUser)
#db_session.commit()
Ejemplo n.º 18
0
def compose_sfp():
    """Performs VNF Chaining in the VNFFG Template.

    This function stands for VNF Chaining and its requirements for CPs and VLs using the VNFFG Template.
    The first interface is reserved for the VNF management interface, and thus it is not used for VNF chaining.
    The following rules are taken into account:
    - cp_in: chooses the cp_in according to the same network of the prior cp_out. If the VNF is the first one, then
             the first CP is chosen (disregarding the management interface)
    - cp_out: if the given VNF has just one CP for VNF chaining, then cp_out = cp_in. Otherwise,
              cp_out is chosen taking into account NFVO requirements implemented in the related agents.
              If cp_out can not be selected automatically, a message with OPTIONS status is returned
              in order to the user inform the desirable and suitable connection point.

    Input params must be in JSON format, which are:
        - sfc_uuid: the unique identifier for the SFC being composed
        - vnf_pkg_id: always required
        - cp_out: not required, but can be used as a manually user input

    :return: OK if success, or ERROR and its reason if not, or OPTIONS and an cp_list dict
    """
    vnffgd = None

    cache = MemcachedCache()

    if 'sfc_uuid' in request.json:
        vnffgd = cache.get(request.json['sfc_uuid'])

    if not vnffgd:
        return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'})

    database = DatabaseConnection()

    vnf_pkg_id = request.json['vnf_pkg_id']
    vnf_pkg_cps = list_vnf_pkg_cps(vnf_pkg_id, internal=True)
    _, catalog = database.list_catalog(vnf_pkg_id)
    vnfd_name = catalog[0]['vnfd_name']

    topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template']

    # verifying if this vnf package was already added to this VNFFG (no duplicate)
    if vnfd_name in topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']:
        return jsonify({'status': ERROR, 'reason': 'The selected VNF Package was already added on this SFC!'})

    cp_list = sorted(vnf_pkg_cps)
    # we are considering that the first CP is always reserved for the VNF management interface
    # Thus, it is not used for VNF chaining
    cp_list.pop(0)

    # gets all virtual links in VNFFGD
    vnffgd_vls = topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']

    # getting the previous network_name for correct VNF chaining
    previous_net_name = ''
    if vnffgd_vls:
        previous_vl = vnffgd_vls[-1]  # gets the current last VL in VNFFG

        # gets the current last VNF Name in VNFFGD
        previous_vnfd_name = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'][-1]
        _, previous_vnf_pkg_id = database.list_catalog(vnfd_name=previous_vnfd_name)
        previous_vnf_pkg_id = previous_vnf_pkg_id[0]['_id']

        # gets all connection points data from previous VNFD
        previous_vnfd_cps = list_vnf_pkg_cps(previous_vnf_pkg_id, internal=True)

        for cp in previous_vnfd_cps:
            if previous_vnfd_cps[cp]['virtual_link'] == previous_vl:
                previous_net_name = previous_vnfd_cps[cp]['network_name']
                break

    cp_in, cp_out = "", ""
    # including cp_input
    for cp in cp_list:
        if vnffgd_vls:  # if there are previous Virtual Links included in VNFFGD

            # cp_in is valid just if it is connected to the same network_name from previous VNF output
            if vnf_pkg_cps[cp]['network_name'] == previous_net_name:
                cp_in = cp
                break

        else:  # if this VNF is the first one being included in VNFFGD
            cp_in = cp
            break

    if not cp_in:
        return jsonify({'status': ERROR, 'reason': 'There is no suitable CP to chaining with previous VNF!'})

    # including cp_output
    num_cps = len(cp_list)
    if num_cps == 1:
        cp_out = cp_in
    else:  # num_cps surely will be > 1, because previous return

        # output CP requirements are dependent of NFVO capabilities, thus it was implemented in the related agent
        result, data = tacker_agent.select_and_validate_cp_out(request.json, vnf_pkg_cps, cp_in)

        if result != OK:
            return jsonify(data)

        cp_out = data

    if cp_in == cp_out:
        capability = [cp_in]
    else:
        capability = [cp_in, cp_out]

    for cp in capability:
        # including connection points
        topology_template['groups']['VNFFG1']['properties']['connection_point'].append(cp)
        # including dependent virtual links
        virtual_link = vnf_pkg_cps[cp]['virtual_link']

        # if virtual_link not in topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']:
        topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'].append(virtual_link)

    # including constituent VNFs
    topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'].append(vnfd_name)

    vnf_end_points = len(capability)
    if vnf_end_points == 1:
        capability = capability[0]
    else:
        capability = ','.join(capability)

    # including number of endpoints
    topology_template['groups']['VNFFG1']['properties']['number_of_endpoints'] += vnf_end_points

    path = {"forwarder": vnfd_name, "capability": capability}

    # including VNF forwarding path
    topology_template['node_templates']['Forwarding_path1']['properties']['path'].append(path)

    # debug
    logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True))

    cache.set(request.json['sfc_uuid'], vnffgd)

    return jsonify({'status': OK})
Ejemplo n.º 19
0
class Query(object):

    default_configuration = RecursiveDict({
        'query_directories': [],
        'cached': False,
        'execute_queries': True,
    })

    def __init__(self,
                 query,
                 params=None,
                 from_file=True,
                 model_class=None,
                 database='default',
                 sessions=sessions,
                 cached=False,
                 replace=None,
                 config=None,
                 cache_timeout=3600 * 12):

        self.config = self.default_configuration
        self.config.update_recursive(configs.get('mysql', {}))
        if config:
            self.config.update_recursive(config)

        # add project path queries directory if not already configured
        project_queries = os.path.join(get_project_path(), 'queries')
        if not project_queries in self.config['query_directories']:
            self.config['query_directories'].append(project_queries)

        if from_file:
            if query in file_queries:
                logger.debug('Getting query file from query files cache %s' %
                             query)
                self.query = file_queries[query]
            else:
                logger.debug('Getting query file %s' % query)
                file_queries[query] = self._get_from_file(query)
                self.query = file_queries[query]
        else:
            self.query = query

        if replace:
            self.query = self.query % replace

        self.params = params
        self.session = sessions.get(database)
        self.result = None
        self.model_class = model_class
        self.cached = cached
        self.query_cache = MemcachedCache(['127.0.0.1:11211'])
        self.cache_timeout = cache_timeout
        self.database = database

    def _gen_mem_key(self):
        params_string = ''
        if self.params:
            ordered_values = OrderedDict(
                sorted(self.params.items(), key=lambda t: t[0]))
            for value in ordered_values:
                params_string = params_string + '_' + value
            return str(hash(self.database + '_' + self.query + params_string))

    def _get_from_file(self, query):
        for query_directory in self.config.get('query_directories'):
            for dirpath, dirnames, filenames in os.walk(query_directory,
                                                        followlinks=True):
                for name in filenames:
                    if '%s.sql' % query in os.path.join(dirpath, name):
                        f = file(os.path.join(dirpath, name))
                        query = f.read()
                        f.close()
                        return query
        raise QueryFileNotFound('%s.sql not found in any of %s' %
                                (query, self.config.get('query_directories')))

    def _raw_query(self):
        if self.config.get('execute_queries'):
            self.result = self.session.execute(self.query, self.params)
        else:
            self.result = None

    def _raw_query_with_model(self):
        if self.config.get('execute_queries'):
            self.result = self.session.query(self.model_class).from_statement(
                self.query).params(**self.params)
        else:
            self.result = None

    def _query(self):
        if self.model_class:
            self._raw_query_with_model()
        else:
            self._raw_query()

    def execute(self):
        self._query()
        return self.result

    def one(self):
        cache_key = ''
        if self.cached:
            cache_key = self._gen_mem_key()
            cached_query = self.query_cache.get(cache_key)
            if cached_query:
                return cached_query
        self._query()
        if self.result:
            if self.model_class:
                _one = self.result.one()
                if self.cached:
                    self.query_cache.set(cache_key, _one, self.cache_timeout)
                return _one
            else:
                fetch_one = self.result.fetchone()
                if self.cached:
                    self.query_cache.set(cache_key, fetch_one,
                                         self.cache_timeout)
                return fetch_one
        if self.cached:
            self.query_cache.set(cache_key, self.result, self.cache_timeout)
        return self.result

    def all(self):
        cache_key = ''

        if self.cached:
            cache_key = self._gen_mem_key()
            cached_query = self.query_cache.get(cache_key)
            if cached_query:
                return cached_query
        self._query()
        if self.result:
            if self.model_class:
                _all = self.result.all()
                if self.cached:
                    query_cache.set(cache_key, _all, self.cache_timeout)
                return _all
            else:
                fetch_all = self.result.fetchall()
                if self.cached:
                    self.query_cache.set(cache_key, fetch_all,
                                         self.cache_timeout)
                return fetch_all
        if self.cached:
            self.query_cache.set(cache_key, self.result, self.cache_timeout)
        return self.result

    @staticmethod
    def last_insert_id():
        return Query('last_insert_id').one()[0]