Ejemplo n.º 1
0
    def scrape(self,url,parent):
        Logger.debug('Starting url scrap for {}'.format(url))
        config.last_url_and_parent=url+', {}'.format('' if parent==None else parent)

        new_url=base_util.unreplace_dot_url(url)

        response=self.http.get(new_url)
        Logger.debug('Got URL')
        if not hasattr(response,'data') and new_url.startswith('www.'):
            new_url=new_url.replace('www.','http://')

            response=self.http.get(new_url)

            if not hasattr(response,'data'):
                new_url=new_url.replace('http://','http://www.')
                response=self.http.get(new_url)


        if hasattr(response,'data'):
            body=base_util.utf_8_safe_decode(response.data)

        else:
            Logger.error('No data associated with '+new_url)
            raise AttributeError(new_url+':::No data')

        return body,new_url
Ejemplo n.º 2
0
def apiMoveUserVision(userId):
    if request.method == 'POST':
        if SessionManager.userLoggedIn():

            userInfo = SessionManager.getUser()
            if userInfo['id'] != userId:
                abort(406)

            parameters = request.json
            if not 'visionId' in parameters or \
               not 'srcIndex' in parameters or \
               not 'destIndex' in parameters:
                abort(406)
            visionId = parameters['visionId']
            srcIndex = parameters['srcIndex']
            destIndex = parameters['destIndex']

            Logger.debug("V:%s src: %s dest: %s" % (visionId, srcIndex, destIndex))

            user = User.getById(userInfo['id'])
            result = user.moveVision(visionId, srcIndex, destIndex)

            if True == result:
                data = { 'result' : "success" }
            else:
                data = { 'result' : "error" }
            return jsonify(data)
        abort(403)
    abort(405)
Ejemplo n.º 3
0
def selectNodes(serviceInstance, serviceCapabilityManager):
    logger = Logger()
    print("SelectNodes start!")
    requirements = serviceInstance.getInterpretedRequirement()

    nodes = serviceCapabilityManager.availableNodes()

    result = {}

    for node, cap in nodes.items():
        result[node] = 0
        for attr, req in requirements.items():
            resource = cap[attr]
            if (resource < req["required"]):
                logger.debug("Lack of resource [" + attr + "], required: [" +
                             str(req["required"]) + "], available: [" +
                             str(resource) + "]")
                result.pop(node)
                break
            else:
                result[node] += resource * req['weight']

    logger.debug(result)

    return ['node01', 'node02']
Ejemplo n.º 4
0
def create():

    '''
    Debugging Tip:
    if you see: 

        Bad Request
        The browser (or proxy) sent a request that this server could not understand.

    (a 400 error)

    Make sure all of the form fields are given correctly

    http://stackoverflow.com/questions/8552675/form-sending-error-flask
    '''

    mediaUrl = request.form[Constant.BOOKMARKLET_POST_MEDIA_URL]
    text = request.form[Constant.BOOKMARKLET_POST_TEXT]
    pageUrl = request.form[Constant.BOOKMARKLET_POST_PAGE_URL]
    pageTitle = request.form[Constant.BOOKMARKLET_POST_PAGE_TITLE]

    #Vision Privacy
    private = False
    if Constant.BOOKMARKLET_POST_IS_PRIVATE in request.form:
        private = True

    #Format for saving
    visionIsPublic = not private

    #Validate Parameters
    if mediaUrl is None \
        or text is None \
        or pageUrl is None \
        or pageTitle is None:
        return "Invalid Vision Parameters"

    Logger.debug("URL: " + mediaUrl)

    #Question: Do we really need to check the login again here?
    #Check Login
    if not SessionManager.userLoggedIn():
        return redirect(url_for('login'))

    #Get the user id
    userId = SessionManager.getUser()['id']

    #Add
    user = User.getById(userId)
    if user:
        # TODO: should we save pageUrl and pageTitle also?
        vision, message = user.addVision(mediaUrl, text, False, visionIsPublic)

        if vision:
            #Successful Create!
            return render_template('successCreatingVision.html',
                                   visionId=vision.id(), userId=userId)
    #Error
    return render_template('errorCreatingVision.html', message=message)
Ejemplo n.º 5
0
def apiAddUserVision(userId):
    if request.method == 'POST':
        if SessionManager.userLoggedIn():
            userInfo = SessionManager.getUser()
            if userInfo['id'] != userId:
                abort(406)

            parameters = request.json
            if not 'useImage' in parameters or \
               not 'text'     in parameters or \
               not 'privacy'  in parameters:
                abort(406)
            useImage = parameters['useImage']
            text = parameters['text'].strip()
            isPublic = parameters['privacy']

            Logger.debug("IsPublic: " + str(isPublic))

            # Make sure input OK to create a new vision
            if useImage == False:
            # TODO: should we allow text w/o image?
            #if useImage == False and len(text) == 0:
                abort(406)

            # Make sure image link OK
            url = ""
            if useImage == True:
                url = SessionManager.getPreviewUrl()

            # Create a new vision with the photo
            user = User.getById(userId)
            
            # Make sure we have a valid user
            if not user:
                data = {'result' : "error"}

            else:
                vision, errorMsg = user.addVision(url, text, True, isPublic)

                if vision:
                    objList = []
                    if None != vision:
                        objList = VisionList.createFromVision(vision)
                    if len(objList.visions()) == 1:
                        data = { 'result'    : "success",
                                 'newVision' : objList.toDictionary(
                                        options=[Vision.Options.PICTURE,
                                                 Vision.Options.USER,
                                                 Vision.Options.PARENT_USER,
                                                 Vision.Options.COMMENT_PICTURES,
                                                 Vision.Options.COMMENTS])[0] }

                else:
                    data = { 'result' : "error" }

            return jsonify(data)
        abort(403)
    abort(405)
Ejemplo n.º 6
0
class DBManager(object):
    def __init__(self, mongo, collection):
        self.logger = Logger()
        self.logger.debug("INTO DBManager!")
        client = MongoClient(mongo["ip"], username=mongo["username"], password=mongo["password"], authSource=mongo["database"], authMechanism='SCRAM-SHA-1')
        database = client.get_database(mongo["database"])
        self.collection = database.get_collection(collection)

    def getCollection(self):
        return self.collection
Ejemplo n.º 7
0
    def scrape_links_from_position(self,pos):
        MongoDB.connect(settings.HOST_NAME,settings.PORT)
        links=self.__get_next_urls(pos)


        Logger.info(links)
        for link in links:
            self.scrape_link_and_child(link)

        Logger.debug('Process job completed')
        return 0
Ejemplo n.º 8
0
def startService(name):
    logger = Logger()
    logger.debug("Start Service!")
    client = docker.from_env()
    service = client.services.create(
        "face_detection",
        name=name,
        networks=["swarm_net"],
        mounts=["/home/pi/video/face_detection/container:/data:rw"],
        mode="replicated",
        constraints=["node.labels.name==node03"])
    #container = client.containers.run("face_detection:latest", detach=True)
    return service
Ejemplo n.º 9
0
class ServiceManager(object):
    def __init__(self, config):
        threading.Thread.__init__(self)
        self.logger = Logger()
        self.serviceList = []
        self.config = config

    def receiveService(self, serviceInstance):
        t = threading.Thread(target=self.publishService(serviceInstance))
        self.serviceList.append([t, serviceInstance])
        t.start()

    def stopService(self, serviceInstance):
        ClusterManager.stopService(serviceInstance)

    #
    # Service Management
    #
    '''
    Name: publishService
    parameter: ServiceInstance
    action: 
            let requirementInterpreter interpret service's requirements in terms of device's capabilities 
            -->
            let resourceSelector select suitable nodes which satisfy service's requirements
            -->
            let clusterManager make selected nodes start service
    '''

    def publishService(self, serviceInstance):
        self.logger.debug("PublishService starts!")

        # INTERPRET
        interpretedRequirement = RequirementInterpreter.interpret(
            serviceInstance)

        # SELECT
        serviceInstance.setInterpretedRequirement(interpretedRequirement)
        serviceCapabilityManager = ServiceCapabilityManager(
            self.config, serviceInstance)
        serviceCapabilityManager.start()
        selectedNodes = ResourceSelector.selectNodes(serviceInstance,
                                                     serviceCapabilityManager)

        print("selected nodes: " + ", ".join(selectedNodes))
        self.logger.debug("selected nodes: " + ", ".join(selectedNodes))

        # START
        serviceInstance.setSeledtedNodes(selectedNodes)
        ClusterManager.startService(serviceInstance)
Ejemplo n.º 10
0
class DeviceAbstractor(object):
    def __init__(self, ip, port, duration, name):
        self.logger = Logger()
        self.logger.debug("INTO DeviceAbstractor!")
        self.capabilityList = []

        self.ip = ip
        self.port = port
        self.duration = duration
        self.name = name

        self.profiler = Profiler(self.duration)

        self.doProfiling()

    def doProfiling(self):
        self.profiler.monitor_frame()
Ejemplo n.º 11
0
    def __init__(self, configFile):
        logger = Logger()
        self.config = configparser.ConfigParser()
        self.config.read(configFile)

        self.name = self.config['Information']['name']

        print(self.name)

        ip = self.config['MQTT']['ip']
        port = int(self.config['MQTT']['port'])
        duration = float(self.config['Profile']['duration'])

        self.location = float(self.config['Parameter']['location'])
        self.resolution = float(self.config['Parameter']['resolution'])
        logger.debug("Start Device!")

        deviceAbstractor = DeviceAbstractor(ip, port, duration, self.name)
Ejemplo n.º 12
0
class DeviceManager(object):
    # The callback for when the client receives a CONNACK response from the server.
    def on_connect(self, client, mosq, userdata, rc):
        print("Connected with result code " + str(rc))
        # Subscribing in on_connect() means that if we lose the connection and
        # reconnect then subscriptions will be renewed.
        client.subscribe("device/#")

    # The callback for when a PUBLISH message is received from the server.
    def on_message(self, client, userdata, msg):
        if( 'start' in msg.topic ):
            print("Service is allocated to device "+self.dev)
            serviceInstance = ServiceInstance(str(msg.payload.decode('utf-8')))
            t = threading.Thread(target=self.startService(serviceInstance))
            self.serviceList.append([t, serviceInstance])
            t.start()
        elif( 'stop' in msg.topic):
            pass

    def __init__(self, ip, port, dev):
        self.containers = []
        self.logger = Logger()
        self.serviceList = []

        self.ip = ip
        self.port = port
        self.dev = dev

        self.logger.debug("Device ["+self.dev+"] is connected!")

        client = mqtt.Client()
        client.on_connect = self.on_connect
        client.on_message = self.on_message

        client.connect(ip, port, 60)
        client.loop_forever()

    '''
Ejemplo n.º 13
0
class ThreadChecker(threading.Thread):
    """
    Thread implementation is completely independent on what
    actions may be run by it ...
    
    """
    def __init__(self, logLevel, action):
        self.actionToRun = action
        self.logger = Logger(name="a2d2 thread",
                             logFile=conf.APPLICATION_LOG_FILE,
                             level=logLevel)
        threading.Thread.__init__(self)
        self.__stopFlag = False
        self.__bypass = False  # if True, actions are skipped in periodic check
        self.logger.info("Initialised.")

    def run(self):
        while True:
            if self.__stopFlag:
                break
            time.sleep(3)
            if self.__stopFlag:
                break
            self.logger.debug("Running ...")

            if self.__bypass:
                self.logger.debug("Dummy loop, actual thread action bypassed.")
            else:
                self.actionToRun(self.logger)

            self.logger.info("Loop finished, sleeping ...")

        self.logger.info("Completely finished.")

    def setStop(self):
        self.logger.debug("Setting stop flag to True ...")
        self.__stopFlag = True
        self.logger.debug("Stop flag set to True.")

    def bypassExecution(self):
        self.__bypass = True

    def resumeExecution(self):
        self.__bypass = False
Ejemplo n.º 14
0
    def scrape_link_and_child(self,parent_url):
        parent_url=base_util.replace_dot_url(parent_url)
        webpage_body,parent_url=self.scrape(base_util.unreplace_dot_url(parent_url),None)

        #exit if failed to scrap website
        if webpage_body is None:
            return

        Logger.debug('Saving Parent')
        MongoDB.save_page(url=parent_url,page=webpage_body)
        Logger.info('Completed page: '+parent_url)

        #Now, we grab the childs of this webpage
        all_ahref=[base_util.combine_parent_rel_link(parent_url,a.attrs['href']) for a in BeautifulSoup(webpage_body,'html.parser', from_encoding="utf-8").find_all('a') if 'href' in a.attrs]

        child_urls=random.sample(all_ahref,settings.GET_X_CHILD) if len(all_ahref)>=settings.GET_X_CHILD else all_ahref

        #get rid of bad normalization
        if not re.match('^www[.].*$',parent_url):
            Logger.info('Updating bad url for {}'.format(parent_url))
            MongoDB.update_url(base_util.normalize_url(parent_url),parent_url)

        if len(child_urls) > 0:

            #get the childs, child urls is a subset of all urls
            for child_url in child_urls:
                Logger.debug('Get Child {}'.format(child_url))
                child_page=self.scrape(child_url,parent_url)

                if child_page is None:
                    exploredset=set()
                    tries=0
                    for url in set(all_ahref)^(exploredset):
                        if tries==settings.MAX_RETRIES:
                            Logger.info('Max retrie number exceeded')
                            break

                        Logger.info("trying new url: "+url)

                        child_page=self.scrape(url,parent_url)

                        if child_page is not None:
                            break
                        exploredset.add(url)

                        tries+=1

                if child_page is not None:
                    Logger.debug('Saving Child {}'.format(child_url))
                    MongoDB.save_modify_url(url=base_util.replace_dot_url(child_url),parent=[MongoDB.get_url_object(parent_url)],genre=[],page=child_page)
                    Logger.info('Completed page: '+child_url)
Ejemplo n.º 15
0
class BaseModel:
    """Abstract class of model for tensorflow graph"""
    AUTHOR = 'demetoir'

    def __str__(self):
        return "%s_%s" % (self.AUTHOR, self.__class__.__name__)

    def __init__(self,
                 input_shapes=None,
                 params=None,
                 logger_path=None,
                 root_path=ROOT_PATH):
        """create instance of AbstractModel

        :type logger_path: str
        :param logger_path: path for log file
        if logger_path is None, log ony stdout
        """
        self.root_path = root_path

        if logger_path is None:
            self.log = Logger(self.__class__.__name__, LOG_PATH)
        else:
            self.log = Logger(self.__class__.__name__, logger_path)

        self.sess = None
        self.saver = None
        self.summary_writer = None
        self.is_built = False

        # gen instance id
        self.input_shapes = input_shapes
        self.params = params

        self.id = "_".join([self.__str__(), time_stamp()])
        self.instance_path = os.path.join(INSTANCE_PATH, self.id)
        self.instance_visual_result_folder_path = os.path.join(
            self.instance_path, VISUAL_RESULT_FOLDER)
        self.instance_source_folder_path = os.path.join(
            self.instance_path, 'src_code')
        self.instance_summary_folder_path = os.path.join(
            self.instance_path, 'summary')
        self.instance_class_name = self.__class__.__name__
        self.instance_source_path = os.path.join(
            self.instance_source_folder_path, self.id + '.py')
        self.metadata_path = os.path.join(self.instance_path, 'instance.meta')
        self.save_folder_path = os.path.join(self.instance_path, 'check_point')
        self.check_point_path = os.path.join(self.save_folder_path,
                                             'instance.ckpt')

        self.metadata = {
            MODEL_METADATA_KEY_INSTANCE_ID: self.id,
            MODEL_METADATA_KEY_INSTANCE_PATH: self.instance_path,
            MODEL_METADATA_KEY_INSTANCE_VISUAL_RESULT_FOLDER_PATH:
            self.instance_visual_result_folder_path,
            MODEL_METADATA_KEY_INSTANCE_SOURCE_FOLDER_PATH:
            self.instance_source_folder_path,
            MODEL_METADATA_KEY_INSTANCE_SOURCE_PATH: self.instance_source_path,
            MODEL_METADATA_KEY_INSTANCE_SUMMARY_FOLDER_PATH:
            self.instance_summary_folder_path,
            MODEL_METADATA_KEY_INSTANCE_CLASS_NAME: self.instance_class_name,
            MODEL_METADATA_KEY_METADATA_PATH: self.metadata_path,
            MODEL_METADATA_KEY_CHECK_POINT_PATH: self.check_point_path,
            MODEL_METADATA_KEY_SAVE_FOLDER_PATH: self.save_folder_path,
            MODEL_METADATA_KEY_PARAMS: self.params,
            MODEL_METADATA_KEY_INPUT_SHAPES: self.input_shapes,
        }

    def __del__(self):
        # TODO this del need hack
        try:
            self.close_session()
            # reset tensorflow graph
            tf.reset_default_graph()

            del self.sess
            del self.root_path
            del self.log
        except BaseException as e:
            pass

    @property
    def hyper_param_key(self):
        return []

    def setup_model(self):
        self.log.debug('init directory')
        setup_directory(self.instance_path)
        setup_directory(self.instance_visual_result_folder_path)
        setup_directory(self.instance_source_folder_path)
        setup_directory(self.instance_summary_folder_path)
        setup_directory(self.save_folder_path)

    def load_metadata(self, path):
        self.metadata = load_json(path)

        self.id = self.metadata[MODEL_METADATA_KEY_INSTANCE_ID]
        self.instance_path = self.metadata[MODEL_METADATA_KEY_INSTANCE_PATH]
        self.instance_visual_result_folder_path = self.metadata[
            MODEL_METADATA_KEY_INSTANCE_VISUAL_RESULT_FOLDER_PATH]
        self.instance_source_path = self.metadata[
            MODEL_METADATA_KEY_INSTANCE_SOURCE_PATH]
        self.instance_class_name = self.metadata[
            MODEL_METADATA_KEY_INSTANCE_CLASS_NAME]
        self.instance_summary_folder_path = self.metadata[
            MODEL_METADATA_KEY_INSTANCE_SUMMARY_FOLDER_PATH]
        self.save_folder_path = self.metadata[
            MODEL_METADATA_KEY_SAVE_FOLDER_PATH]
        self.check_point_path = self.metadata[
            MODEL_METADATA_KEY_CHECK_POINT_PATH]
        self.params = self.metadata[MODEL_METADATA_KEY_PARAMS]
        self.input_shapes = self.metadata[MODEL_METADATA_KEY_INPUT_SHAPES]

    def save_metadata(self, path):
        self.log.debug('dump metadata')
        dump_json(self.metadata, path)

    def open_session(self):
        if self.sess is None:
            self.sess = tf.Session()
            self.saver = tf.train.Saver()
            self.sess.run(tf.global_variables_initializer())
            # self.summary_writer = tf.summary.FileWriter(self.instance_summary_folder_path, self.sess.graph)
        else:
            raise Exception("fail to open tf session")

    def close_session(self):
        if self.sess is not None:
            self.sess.close()

        if self.saver is not None:
            self.saver = None

        if self.summary_writer is not None:
            pass
            # self.summary_writer.close()

    def build(self):
        try:
            with tf.variable_scope(str(self.id)):
                with tf.variable_scope("misc_ops"):
                    self.log.debug("build_misc_ops")
                    self.build_misc_ops()

                with tf.variable_scope("hyper_parameter"):
                    self.log.debug('build_hyper_parameter')
                    self.hyper_parameter()
                    self.build_hyper_parameter(self.params)

                self.log.debug('build_input_shapes')

                if self.input_shapes is None:
                    raise AttributeError("input_shapes not feed")
                self.build_input_shapes(self.input_shapes)

                self.log.debug('build_main_graph')
                self.build_main_graph()

                with tf.variable_scope('loss_function'):
                    self.log.debug('build_loss_function')
                    self.build_loss_function()

                with tf.variable_scope('train_ops'):
                    self.log.debug('build_train_ops')
                    self.build_train_ops()

                with tf.variable_scope('summary_ops'):
                    self.log.debug('build_summary_ops')
                    self.build_summary_ops()

        except Exception:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.log.error("\n", "".join(traceback.format_tb(exc_traceback)))
            raise ModelBuildFailError("ModelBuildFailError")
        else:
            self.is_built = True
            self.log.info("build success")

    def build_input_shapes(self, input_shapes):
        """load input shapes for tensor placeholder

        :type input_shapes: dict
        :param input_shapes: input shapes for tensor placeholder

        :raise NotImplementError
        if not Implemented
        """
        raise NotImplementedError

    def build_hyper_parameter(self, params=None):
        """load hyper parameter for model

        :param params:
        :raise NotImplementError
        if not implemented
        """
        if params is not None:
            for key in self.hyper_param_key:
                self.__dict__[key] = params[key]

    def build_main_graph(self):
        """load main tensor graph

        :raise NotImplementError
        if not implemented
        """
        raise NotImplementedError

    def build_loss_function(self):
        """load loss function of model

        :raise NotImplementError
        if not implemented
        """
        raise NotImplementedError

    def build_misc_ops(self):
        """load misc operation of model

        :raise NotImplementError
        if not implemented
        """
        self.global_step = tf.get_variable("global_step",
                                           shape=1,
                                           initializer=tf.zeros_initializer)
        self.op_inc_global_step = tf.assign(self.global_step,
                                            self.global_step + 1,
                                            name='op_inc_global_step')

        self.global_epoch = tf.get_variable("global_epoch",
                                            shape=1,
                                            initializer=tf.zeros_initializer)
        self.op_inc_global_step = tf.assign(self.global_epoch,
                                            self.global_step + 1,
                                            name='op_inc_global_epoch')

    def build_train_ops(self):
        """Load train operation of model

        :raise NotImplementError
        if not implemented
        """
        raise NotImplementedError

    def build_summary_ops(self):
        """load summary operation for tensorboard

        :raise NotImplemented
        if not implemented
        """
        pass

    def write_summary(self,
                      sess=None,
                      iter_num=None,
                      dataset=None,
                      summary_writer=None):
        """write summary of model for tensorboard

        :type sess: Session object for tensorflow.Session
        :type iter_num: int
        :type dataset: dataset_handler.AbstractDataset
        :type summary_writer: tensorflow.summary.FileWriter

        :param sess: session object for tensorflow
        :param iter_num: current iteration number
        :param dataset: dataset for train model
        :param summary_writer: file writer for tensorboard summary

        :raise NotImplementedError
        if not implemented
        """
        pass

    def hyper_parameter(self):
        self.batch_size = None
        pass

    def save(self):
        self.setup_model()
        self.save_metadata(self.metadata_path)

        if self.sess is None:
            self.open_session()
        self.saver.save(self.sess, self.check_point_path)

        self.log.info("saved at {}".format(self.instance_path))

        return self.instance_path

    def load(self, path):
        path = os.path.join(path, 'instance.meta')
        self.load_metadata(path)

        self.build()
        self.close_session()
        self.open_session()

        self.saver.restore(self.sess, self.check_point_path)

    def get_tf_values(self, fetches, feet_dict):
        self.sess.run(fetches, feet_dict)

    def if_not_ready_to_train(self):
        if not self.is_built:
            self.build()

        if self.sess is None:
            self.open_session()
Ejemplo n.º 16
0
def Worker_print(string):
  Logger.debug(string)
Ejemplo n.º 17
0
class ServiceManager(threading.Thread):
    # The callback for when the client receives a CONNACK response from the server.
    def on_connect(self, client, mosq, userdata, rc):
        print("Connected with result code " + str(rc))
        # Subscribing in on_connect() means that if we lose the connection and
        # reconnect then subscriptions will be renewed.
        client.subscribe("service/#")

    # The callback for when a PUBLISH message is received from the server.
    def on_message(self, client, userdata, msg):
        print("Service is requested!")
        serviceInstance = ServiceInstance(str(msg.payload.decode('utf-8')))
        t = threading.Thread(target=self.publishService(serviceInstance))
        self.serviceList.append([t, serviceInstance])
        t.start()

    def __init__(self, config):
        threading.Thread.__init__(self)
        self.logger = Logger()
        self.serviceList = []

        self.config = config

        self.ip = config['MQTT']['ip']
        self.port = int(config['MQTT']['port'])

    def run(self):
        client = mqtt.Client()
        client.on_connect = self.on_connect
        client.on_message = self.on_message

        client.connect(self.ip, self.port, 60)
        client.loop_forever()

    #
    # Service Management
    #
    '''
    Name: publishService
    parameter: ServiceInstance
    action: 
            let requirementInterpreter interpret service's requirements in terms of device's capabilities 
            -->
            let resourceSelector select suitable nodes which satisfy service's requirements
            -->
            let clusterManager make selected nodes start service
    '''

    def publishService(self, serviceInstance):
        self.logger.debug("PublishService starts!")

        # INTERPRET
        interpretedRequirement = RequirementInterpreter.interpret(
            serviceInstance)

        # SELECT
        serviceInstance.setInterpretedRequirement(interpretedRequirement)
        serviceCapabilityManager = ServiceCapabilityManager(
            self.config, serviceInstance)
        serviceCapabilityManager.start()
        selectedNodes = ResourceSelector.selectNodes(serviceInstance,
                                                     serviceCapabilityManager)

        print("selected nodes: " + ", ".join(selectedNodes))
        self.logger.debug("selected nodes: " + ", ".join(selectedNodes))

        # START
        serviceInstance.setSeledtedNodes(selectedNodes)
        ClusterManager.startService(self.config, serviceInstance)
Ejemplo n.º 18
0
class Knowledge(object):
    def __init__(self):
        self.logger = Logger()
        self.service = {
            "ObjectTracking": {
                "Performance": {
                    "DetectionSpeed": "ProcessingTime",
                    "DetectionAccuracy": "DetectionRate"
                },
                "Reliability": {
                    "VideoContinuity": "FPS"
                },
                "Security": {
                    "VideoComposition": "NumberOfComposedVideos"
                }
            },
            "ObjectCounting": {
                "Performance": {
                    "DetectionSpeed": "ProcessingTime",
                    "DetectionAccuracy": "DetectionRate"
                },
                "Reliability": {
                    "VideoContinuity": "FPS"
                },
                "Security": {
                    "VideoComposition": "NumberOfComposedVideos"
                }
            }
        }

        self.logger.debug("Get Service Knowledge")

    def interpret(self, serviceInstance):
        requirement = serviceInstance.getRequirement()
        serviceType = serviceInstance.getType()
        result = {}
        '''
       {
            "DetectionSpeed": {
                "metric": "ProcessingTime",              - for capability calculation
                "weight": 10,    // 0 ~ 10               - for utility function
                required: 70     // 0 ~ 100 (normalized) - for filtering
            }, ..
        }
        '''

        # Performance
        if requirement["Performance"] == "HIGH":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 10, "required": 70}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 10, "required": 70}
        elif requirement["Performance"] == "MID":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 6, "required": 50}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 6, "required": 50}
        else:
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 4, "required": 30}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Performance"].items(
                ):
                    result[key] = {"metric": val, "weight": 4, "required": 30}

        # Reliability
        if requirement["Reliability"] == "HIGH":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 10, "required": 70}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 10, "required": 70}
        elif requirement["Reliability"] == "MID":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 6, "required": 50}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 6, "required": 50}
        else:
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 4, "required": 30}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Reliability"].items(
                ):
                    result[key] = {"metric": val, "weight": 4, "required": 30}

        # Security
        if requirement["Security"] == "HIGH":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 10, "required": 70}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 10, "required": 70}
        elif requirement["Security"] == "MID":
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 6, "required": 50}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 6, "required": 50}
        else:
            if serviceType == "ObjectTracking":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 4, "required": 30}
            if serviceType == "ObjectCounting":
                for key, val in self.service[serviceType]["Security"].items():
                    result[key] = {"metric": val, "weight": 4, "required": 30}

        self.logger.debug("[InterpretedRequirement]")
        self.logger.debug(result)
        return result
Ejemplo n.º 19
0
    clustering_alg=KMeans
    settings.num_clusters=list({16})
    settings.num_top_words=20 #LDA only
    settings.max_cluster_size=10000 #the cluster will be further broken up if it is greater than this size
    settings.break_up_clusters=True
    settings.spectre_clustering_limit=15000 # if the cluster is less than 15K in size, use spectre clustering instead

    #LOAD DATA
    #generate_random_sample(unpickle_obj(X_pickle_path),unpickle_obj(y_pickle_path),unpickle_obj(ref_index_pickle_path),50000)

    train_set=Training(settings,pickle_dir=PICKLE_DIR)
    train_set.load_training()

    #FEATURE SELECTION
    best_k_attr=10000
    feature_selector=Pipeline([("chi2",SelectKBest(chi2,best_k_attr))])

    clustering_logger.info("Choosing best {} features".format(best_k_attr))

    clustering_logger.debug("Normalizing to LV1")
    #NORMALIZING THE Y
    train_set.y=np.array([[normalize_genre_string(g,1) for g in r] for r in (row for row in train_set.y)])

    clusterer=Clustering()
    clusterer.feature_selection(train_set,feature_selector,fit=True)

    lda_alg=LDA(n_topics=settings.num_clusters[0],n_iter=500, random_state=1)

    lda(lda_alg,train_set,settings.num_top_words)
    #unsupervised(train_set=train_set, settings=settings,clusterer=clusterer, clustering_alg_cls=clustering_alg)