コード例 #1
0
ファイル: __init__.py プロジェクト: amvtek/PySerializers
def get_pycapnpstuff():
    "return capnp adapted message factories for StuffToTest schema"

    # loads the StuffToTest schema
    import capnp
    capnp.remove_import_hook()
    schmod = capnp.load(join(PROJECT_ROOT,'idl/StuffToTest.capnp'))

    # contruct schema
    schema = SchemaContainer('pycapnp', capnp.__version__)

    context = {}

    for message, fieldnames in FNIDX:

        # load capnp Struct class
        structClass = getattr(schmod,message)
        
        # contruct Struct proxy object
        structProxy = CapNpStructProxy(structClass, fieldnames, context)

        # add proxy to schema
        setattr(schema, message, structProxy)

        # update context for it to hold new structProxy reference
        context[message] = structProxy

    return schema
コード例 #2
0
ファイル: test_load.py プロジェクト: brifordwylie/pycapnp
def test_remove_import_hook():
    capnp.add_import_hook([this_dir])
    capnp.remove_import_hook()

    if 'addressbook_capnp' in sys.modules:
        del sys.modules['addressbook_capnp'] # hack to deal with it being imported already

    with pytest.raises(ImportError):
        import addressbook_capnp
コード例 #3
0
ファイル: __init__.py プロジェクト: little-dude/tgen
def make_schemas_module():
    """
    small hack to make the schemas available in ``sys.modules``
    """
    import os
    import sys

    capnp.remove_import_hook()
    pkg_path = os.path.dirname(os.path.realpath(__file__))
    schemas_path = os.path.join(pkg_path, 'schemas.capnp')
    sys.modules['schemas'] = capnp.load(
        encoding.ensure_native_str(schemas_path))
コード例 #4
0
ファイル: __init__.py プロジェクト: waynenilsen/capnp-ffi
'''
Cap'n Proto FFI tools for Python

Cap'n Proto is a fast binary format which is being used to pass messages across the FFI boundary. This allows easy declaration and modification of complex messages that get passed between function boundaries. There are other possible ways of doing the same thing but Cap'n Proto is the best candidate at this time for spporting Python 2, 3 and Rust at the same time.

Cap'n Proto has the advantage that it also defines interfaces which may be used later. This feature currently is not being used in this library.

'''

import cffi
import capnp
import functools
from types import FunctionType

capnp.remove_import_hook()

def external(rtype):
	'''
	This decorator declares a function as being external (to python) 
	
	:param rtype: the string value of the return type from the capnproto schema
	'''
	def _external(func):
		def callme(self, imessage):
			msgbytes = imessage.to_bytes()
			rvalue = getattr(self.lib, func.__name__)(msgbytes, len(msgbytes))
			return getattr(self.MessageType, rtype).from_bytes(b''.join(
				rvalue.values[i] for i in range(rvalue.len)
			))
		callme.__name__ = func.__name__
		callme.__doc__ = func.__doc__
コード例 #5
0
ファイル: feature_server.py プロジェクト: amplab/clipper-v0
from pyspark.mllib.classification import LogisticRegressionModel, LogisticRegressionWithSGD
from pyspark.mllib.tree import RandomForestModel

import argparse
import socket
import random
import capnp
import os
from numpy.random import exponential as expn
from sklearn.externals import joblib
# print(joblib.__version__)
import numpy as np
# from collections import OrderedDict
from sklearn import linear_model as lm
import sklearn.svm as svm
capnp.remove_import_hook()
feature_capnp = capnp.load(
    os.path.abspath('../../clipper_server/schema/feature.capnp'))
from sample_feature import TestFeature

EXPN_SCALE_PARAM = 5.0

# import graphlab as gl

# def load_gl_model(local_path):
#     return gl.load_model(local_path)

# import caffe


def load_scikit_model(pickle_path):
コード例 #6
0
def action():
    import CloudscalerLibcloud
    import os
    import capnp

    redises = {}

    now = datetime.utcnow()
    month = now.month
    hour = now.hour
    day = now.day
    year = now.year
    capnp.remove_import_hook()
    schemapath = os.path.join(
        os.path.dirname(CloudscalerLibcloud.__file__),
        "schemas",
        "resourcemonitoring.capnp",
    )
    cloudspace_capnp = capnp.load(schemapath)
    nodecl = j.clients.osis.getCategory(j.core.osis.client, "system", "node")
    imagecl = j.clients.osis.getCategory(j.core.osis.client, "cloudbroker", "image")
    stackcl = j.clients.osis.getCategory(j.core.osis.client, "cloudbroker", "stack")
    vcl = j.clients.osis.getNamespace("vfw")
    virtualfirewalls = {
        vfw["id"]: vfw
        for vfw in vcl.virtualfirewall.search({"gid": j.application.whoAmI.gid})[1:]
    }
    nodes = {
        node["id"]: node
        for node in nodecl.search({"gid": j.application.whoAmI.gid})[1:]
    }
    stacks = {
        stack["id"]: stack
        for stack in stackcl.search({"gid": j.application.whoAmI.gid})[1:]
    }
    images_list = imagecl.search({"$fields": ["id", "name"]})[1:]
    images_dict = {}
    for image in images_list:
        images_dict[image["id"]] = image["name"]

    gid = j.application.whoAmI.gid
    # nid = j.application.whoAmI.nid
    cached_accounts = get_cached_accounts()

    def get_node_redis(node, port=9999):
        redis = redises.get(node["id"])
        if not redis:
            for nicinfo in node["netaddr"]:
                if nicinfo["name"] == "backplane1":
                    ip = nicinfo["ip"][0]
                    break
            else:
                return None
            redis = j.clients.redis.getRedisClient(ip, port)
            redises[node["id"]] = redis
        return redis

    for account_id, cloudspaces_dict in cached_accounts.items():
        folder_name = "/opt/jumpscale7/var/resourcetracking/active/%s/%s/%s/%s/%s" % (
            account_id,
            year,
            month,
            day,
            hour,
        )
        j.do.createDir(folder_name)

        for cloudspace_id, cs in cloudspaces_dict.items():
            vms = cs["vms"]
            cloudspace = cloudspace_capnp.CloudSpace.new_message()
            cloudspace.accountId = account_id
            cloudspace.cloudSpaceId = cloudspace_id
            if cs["status"] == "DEPLOYED" and cs["networkId"] in virtualfirewalls:
                networkId = hex(cs["networkId"])
                net = virtualfirewalls[cs["networkId"]]
                nid = net["nid"]
                node = nodes[nid]
                redis = get_node_redis(node)
                data = dict(gid=gid, nid=nid, id=networkId)
                publicTX = get_last_hour_val(
                    redis,
                    "stats:{gid}_{nid}:[email protected]{id}".format(
                        **data
                    ),
                )
                publicRX = get_last_hour_val(
                    redis,
                    "stats:{gid}_{nid}:[email protected]{id}".format(
                        **data
                    ),
                )
                spaceRX = get_last_hour_val(
                    redis,
                    "stats:{gid}_{nid}:[email protected]{id}".format(
                        **data
                    ),
                )
                spaceTX = get_last_hour_val(
                    redis,
                    "stats:{gid}_{nid}:[email protected]{id}".format(
                        **data
                    ),
                )
            else:
                publicTX = publicRX = spaceRX = spaceTX = 0

            machines = cloudspace.init("machines", len(vms) + 1)
            m = machines[0]
            m.type = "routeros"
            nics = m.init("networks", 2)
            nic1 = nics[0]
            nic1.tx = publicTX
            nic1.tx = publicRX
            nic1.type = "external"
            nic2 = nics[1]
            nic2.tx = spaceTX
            nic2.rx = spaceRX
            nic2.type = "space"

            for idx, machine_dict in enumerate(vms):
                vm_id = machine_dict["id"]
                m = machines[idx + 1]
                m.type = "vm"
                m.id = vm_id
                stack_id = machine_dict.get("stackId", None)
                # get Image name
                image_name = images_dict.get(machine_dict["imageId"], "")
                has_stack = machine_dict["status"] != "HALTED" and stack_id
                if has_stack:
                    # get redis for this stack
                    stack = stacks[stack_id]
                    nid = int(stack["referenceId"])
                    redis = get_node_redis(nodes[nid])
                    # get CPU
                    cpu_key = "stats:{gid}_{nid}:machine.CPU.utilisation@virt.{vm_id}".format(
                        gid=gid, nid=nid, vm_id=vm_id
                    )
                    cpu_seconds = get_last_hour_val(redis, cpu_key)
                    m.cpuMinutes = cpu_seconds / 60
                else:
                    redis = None
                    m.cpuMinutes = 0

                disks_capnp = m.init("disks", len(machine_dict["disks"]))
                # calculate iops
                for index, disk in enumerate(machine_dict["disks"]):
                    disk_id = disk["id"]
                    disk_capnp = disks_capnp[index]
                    disk_capnp.id = disk_id
                    disk_capnp.size = disk["sizeMax"]
                    disk_iops_read_key = "stats:{gid}_{nid}:disk.iops.read@virt.{disk_id}".format(
                        gid=gid, nid=nid, disk_id=disk_id
                    )
                    val = get_val(redis, disk_iops_read_key)
                    disk_capnp.iopsRead = val.get("h_last", 0)
                    disk_capnp.iopsReadMax = val.get("h_last_max", 0)
                    disk_iops_write_key = "stats:{gid}_{nid}:disk.iops.write@virt.{disk_id}".format(
                        gid=gid, nid=nid, disk_id=disk_id
                    )
                    val = get_val(redis, disk_iops_write_key)
                    disk_capnp.iopsWrite = val.get("h_last", 0)
                    disk_capnp.iopsWriteMax = val.get("h_last_max", 0)

                # Calculate Network tx and rx
                nics = m.init("networks", len(machine_dict["nics"]))
                for index, nic in enumerate(machine_dict["nics"]):
                    mac = nic["macAddress"]
                    nic_capnp = nics[index]
                    nic_capnp.type = "external" if nic["type"] == "PUBLIC" else "space"
                    tx_key = "stats:{gid}_{nid}:network.packets.tx@virt.{mac}".format(
                        gid=gid, nid=nid, mac=mac
                    )
                    rx_key = "stats:{gid}_{nid}:network.packets.rx@virt.{mac}".format(
                        gid=gid, nid=nid, mac=mac
                    )
                    nic_capnp.tx = get_last_hour_val(redis, tx_key)
                    nic_capnp.rx = get_last_hour_val(redis, rx_key)

                m.imageName = image_name
                m.mem = machine_dict["memory"]
                m.vcpus = machine_dict["vcpus"]
                m.status = machine_dict["status"]
                # write files to disk
            with open("%s/%s.bin" % (folder_name, cloudspace_id), "w+b") as f:
                cloudspace.write(f)
コード例 #7
0
def session_capnp():
    """Load out capnproto schema
    """
    import capnp
    capnp.remove_import_hook()
    return capnp.load(os.path.join(capture.__path__[0], 'session.capnp'))
コード例 #8
0
def load_capnp(filename):
    src_dir = os.path.dirname(__file__)
    capnp.remove_import_hook()
    return capnp.load(os.path.join(src_dir, "../capnp", filename))
コード例 #9
0
 def __init__(self, schema):
     capnp.remove_import_hook()
     self.schema = capnp.load(schema)
コード例 #10
0
def main(options):
    now = datetime.utcnow()
    hour = now.hour
    day = now.day
    month = now.month
    year = now.year

    capnp.remove_import_hook()
    schemapath = os.path.join(os.path.dirname(CloudscalerLibcloud.__file__),
                              'schemas', 'resourcemonitoring.capnp')
    resources_capnp = capnp.load(schemapath)
    root_path = "/opt/jumpscale7/var/resourcetracking"
    accounts = listdir(root_path)

    book = xlwt.Workbook(encoding='utf-8')
    nosheets = True
    for account in accounts:
        hour = now.hour
        file_path = os.path.join(root_path,
                                 str(account), str(year), str(month), str(day),
                                 str(hour), 'account_capnp.bin')
        while not os.path.exists(file_path) and hour != 0:
            hour -= 1
            file_path = os.path.join(root_path, str(account), str(year),
                                     str(month), str(day), str(hour),
                                     'account_capnp.bin')
        if not os.path.exists(file_path):
            print('Skipping %s' % file_path)
            continue
        nosheets = False
        sheet = book.add_sheet("account %s" % account)
        sheet.write(0, 0, 'Cloud Space ID')
        sheet.write(0, 1, 'Machine Count')
        sheet.write(0, 2, 'Total Memory')
        sheet.write(0, 3, 'Total VCPUs')
        sheet.write(0, 4, 'Disk Size')
        sheet.write(0, 5, 'Disk IOPS Read')
        sheet.write(0, 6, 'Disk IOPS Write')
        sheet.write(0, 7, 'NICs TX')
        sheet.write(0, 8, 'NICs RX')
        try:
            with open(file_path, 'rb') as f:
                account_obj = resources_capnp.Account.read(f)
                if options.debug:
                    pprint.pprint(account_obj.to_dict())
                for idx, cs in enumerate(account_obj.cloudspaces):
                    cs_id = cs.cloudSpaceId
                    machines = len(cs.machines)
                    vcpus = 0
                    mem = 0
                    disksize = 0
                    disk_iops_read = 0
                    disk_iops_write = 0
                    nics_tx = 0
                    nics_rx = 0
                    for machine in cs.machines:
                        vcpus += machine.vcpus
                        mem += machine.mem
                        for disk in machine.disks:
                            disk_iops_read += disk.iopsRead
                            disk_iops_write += disk.iopsWrite
                            disksize += disk.size
                        for nic in machine.networks:
                            nics_tx += nic.tx
                            nics_rx += nic.rx
                    sheet.write(idx + 1, 0, cs_id)
                    sheet.write(idx + 1, 1, machines)
                    sheet.write(idx + 1, 2, mem)
                    sheet.write(idx + 1, 3, vcpus)
                    sheet.write(idx + 1, 4, disksize)
                    sheet.write(idx + 1, 5, disk_iops_read)
                    sheet.write(idx + 1, 6, disk_iops_write)
                    sheet.write(idx + 1, 7, nics_tx)
                    sheet.write(idx + 1, 8, nics_rx)
        except Exception as e:
            print(e)

    if nosheets is False:
        book.save('example.xls')
    else:
        print('No data found')
コード例 #11
0
 def init(self, socket):
     capnp.remove_import_hook()
     self.service = capnp.load('FeedbackCamera.capnp')
     self.client = capnp.TwoPartyClient(socket)
     self.camera = self.client.bootstrap()
     self.camera = self.camera.cast_as(self.service.FeedbackCamera)
コード例 #12
0
 def init(self, socket):
     capnp.remove_import_hook()
     self.service = capnp.load('RobotService.capnp')
     self.client = capnp.TwoPartyClient(socket)
     self.robot = self.client.bootstrap()
     self.robot = self.robot.cast_as(self.service.RobotService)
コード例 #13
0
def main(options):
    now = datetime.utcnow()
    hour = now.hour
    day = now.day
    month = now.month
    year = now.year

    capnp.remove_import_hook()
    resources_capnp = capnp.load("resourcemonitoring.capnp")
    root_path = environ["DESTINATION_FOLDER"]
    accounts = listdir(root_path)

    book = xlwt.Workbook(encoding="utf-8")
    nosheets = True
    for account in accounts:
        hour = now.hour
        file_path = os.path.join(
            root_path,
            str(account),
            str(year),
            str(month),
            str(day),
            str(hour),
            "account_capnp.bin",
        )
        while not os.path.exists(file_path) and hour != 0:
            hour -= 1
            file_path = os.path.join(
                root_path,
                str(account),
                str(year),
                str(month),
                str(day),
                str(hour),
                "account_capnp.bin",
            )
        if not os.path.exists(file_path):
            print("Skipping %s" % file_path)
            continue
        nosheets = False
        sheet = book.add_sheet("account %s" % account)
        sheet.write(0, 0, "Cloud Space ID")
        sheet.write(0, 1, "Machine Count")
        sheet.write(0, 2, "Total Memory")
        sheet.write(0, 3, "Total VCPUs")
        sheet.write(0, 4, "Disk Size")
        sheet.write(0, 5, "Disk IOPS Read")
        sheet.write(0, 6, "Disk IOPS Write")
        sheet.write(0, 7, "NICs TX")
        sheet.write(0, 8, "NICs RX")
        try:
            with open(file_path, "rb") as f:
                account_obj = resources_capnp.Account.read(f)
                if options.debug:
                    pprint.pprint(account_obj.to_dict())
                for idx, cs in enumerate(account_obj.cloudspaces):
                    cs_id = cs.cloudSpaceId
                    machines = len(cs.machines)
                    vcpus = 0
                    mem = 0
                    disksize = 0
                    disk_iops_read = 0
                    disk_iops_write = 0
                    nics_tx = 0
                    nics_rx = 0
                    for machine in cs.machines:
                        vcpus += machine.vcpus
                        mem += machine.mem
                        for disk in machine.disks:
                            disk_iops_read += disk.iopsRead
                            disk_iops_write += disk.iopsWrite
                            disksize += disk.size
                        for nic in machine.networks:
                            nics_tx += nic.tx
                            nics_rx += nic.rx
                    sheet.write(idx + 1, 0, cs_id)
                    sheet.write(idx + 1, 1, machines)
                    sheet.write(idx + 1, 2, mem)
                    sheet.write(idx + 1, 3, vcpus)
                    sheet.write(idx + 1, 4, disksize)
                    sheet.write(idx + 1, 5, disk_iops_read)
                    sheet.write(idx + 1, 6, disk_iops_write)
                    sheet.write(idx + 1, 7, nics_tx)
                    sheet.write(idx + 1, 8, nics_rx)
        except Exception as e:
            print(e)

    if nosheets is False:
        book.save("example.xls")
    else:
        print("No data found")
コード例 #14
0
def action(gid=None):
    """
    Send tar of account data on  each enviroment
    """
    import CloudscalerLibcloud
    import capnp
    agentcontroller = j.clients.agentcontroller.get()
    cbcl = j.clients.osis.getNamespace("cloudbroker")
    jobs = list()

    capnp.remove_import_hook()
    schemapath = os.path.join(os.path.dirname(CloudscalerLibcloud.__file__),
                              'schemas')
    resources_capnp = capnp.load(
        os.path.join(schemapath, 'resourcemonitoring.capnp'))

    # schedule command
    for location in cbcl.location.search({})[1:]:
        jobs.append(
            agentcontroller.scheduleCmd(cmdcategory="greenitglobe",
                                        cmdname="collect_account_data",
                                        nid=None,
                                        timeout=60,
                                        roles=['controller'],
                                        gid=location["gid"],
                                        wait=True))

    # get return from each job.
    accounts = dict()
    for job in jobs:
        result = agentcontroller.waitJumpscript(job=job)

        # read the tar.
        c = io.BytesIO()
        if result['state'] != 'OK':
            raise RuntimeError("Failed to collect account data from grid %s" %
                               (job['gid']))
        result_decoded = base64.decodestring(result['result'])
        c.write(result_decoded)
        c.seek(0)
        tar = tarfile.open(mode="r", fileobj=c)
        members = tar.getmembers()
        for member in members:
            if member.name.endswith(".bin"):
                accountid, year, month, day, hour = re.findall(
                    "opt/jumpscale7/var/resourcetracking/active/([\d]+)/([\d]+)/([\d]+)/([\d]+)/([\d]+)/",
                    member.name)[0]

                datekey = (year, month, day, hour)
                accounts.setdefault(accountid, {
                    datekey: []
                }).setdefault(datekey, []).append(member)

    for account_id, dates in accounts.iteritems():
        for date, members in dates.iteritems():
            account = resources_capnp.Account.new_message()
            year, month, day, hour = date
            account.accountId = int(account_id)
            cloudspaces = account.init("cloudspaces", len(members))

            for i, member in enumerate(members):
                # read the capnp file obj.
                binary_content = tar.extractfile(member).read()
                cloudspace_obj = resources_capnp.CloudSpace.from_bytes(
                    binary_content)
                cloudspaces[i] = cloudspace_obj
                filepath = '/opt/jumpscale7/var/resourcetracking/%s/' % os.path.join(
                    account_id, year, month, day, hour)
                try:
                    os.makedirs(filepath)
                except OSError as err:
                    if err.errno != 17:
                        raise

            with open(os.path.join(filepath, "account_capnp.bin"), 'w+b') as f:
                account.write(f)