Esempio n. 1
0
 def __init__(
     self,
     num_workers=4,
     start_method="fork",
     worker_class=ProcessQueueExecutor,
 ):
     # if not isinstance(worker_class, Worker):
     #     raise Exception
     # Retrieve the spawn context for the joinable queue super class
     ctx = get_context(start_method)
     # Init super class
     super().__init__(ctx=ctx)
     # Number of workers to spawn
     self._num_workers = num_workers
     # JoinableQueue to store completed jobs
     self._completed_jobs = JoinableQueue(ctx=ctx)
     # Worker class, can be either Process or Thread
     self._workerclass = worker_class
     self._results = {}
     threading.Thread(
         target=gather_results,
         args=(self._results, self._completed_jobs),
         daemon=True,
     ).start()
     # Spin the workers
     self.start_workers()
Esempio n. 2
0
def UploadSingleArtifact(index_i: int, num_users: int, num_repos: int,
                         benchmark: Benchmark, deploy_tokens: list,
                         return_queue: JoinableQueue) -> str:

    if config.VERBOSE:
        log(f"Processing {index_i}: starting")

    artifact_size = random.randint(config.ARTIFACT_SIZE_LOWER,
                                   config.ARTIFACT_SIZE_UPPER)

    u_num = index_i % num_users
    username = f"{USERNAMEPREFIX}{u_num}"
    r_num = index_i % num_repos
    repository = f"repo{r_num}"
    organization = username

    stop = False
    num = index_i % num_users
    while not stop:  # continue to all artifacts have been created. We need a certain state
        success, benchmark_obj = cloudstash_upload_artifact(
            benchmark, index_i, artifact_size, username, deploy_tokens[num],
            repository, organization)
        stop = success

        if success == True:
            # artifact_data = benchmark_obj["artifact_raw_data"]
            artifact_data = benchmark_obj["payload"]
            if config.VERBOSE:
                log(f"Processing {index_i}: finishing")
            return_queue.put(artifact_data)
Esempio n. 3
0
def GetArtifactNames(benchmark: Benchmark, repository_id: int,
                     return_queue: JoinableQueue):
    endpoint_url = f"{benchmark.gateway_url}/repository/{repository_id}"
    headers = {"content-type": "application/json"}
    for _ in range(0, config.RETRIES):
        response = requests.get(endpoint_url, headers=headers)

        if response.status_code == 200:
            json_objs = response.json()
            for obj in json_objs['artifacts']:
                return_queue.put(
                    (repository_id,
                     f"{obj['group_name']}/{obj['artifact_name']}"))
            return
        else:
            log(f"Repository creation failed for repository{repository_id}, waiting {config.RETRY_DELAY}s before trying again.",
                error=True)
            sleep(config.RETRY_DELAY)
Esempio n. 4
0
def GetArtifactId(benchmark: Benchmark, repository_id: int, artifact_name: str,
                  return_queue: JoinableQueue):
    endpoint_url = f"{benchmark.gateway_url}/repository/{repository_id}/artifact/{artifact_name}"
    headers = {"content-type": "application/json"}
    for _ in range(0, config.RETRIES):
        response = requests.get(
            endpoint_url,
            headers=headers,
        )
        if response.status_code == 200:
            json_obj = response.json()
            for obj in json_obj:  # should only be 1. Ok to return
                return_queue.put(obj['artifactId'])
            return None
        else:
            log(
                f"Status code {response.status_code}: Failed to get artifact id for repo {repository_id} artifact {artifact_name}, waiting {config.RETRY_DELAY}s before trying again.",
                error=True,
            )
            sleep(config.RETRY_DELAY)
    return None
Esempio n. 5
0
    def __init__(self, transitions, name=None):
        self._running = multiprocessing.Event()
        self.interrupted = multiprocessing.Event()
        self.name = name or Machine.get_random_string()

        self.tasks = JoinableQueue(maxsize=-1,
                                   ctx=multiprocessing.get_context())
        self.states, self.transitions, self.start_state = self._init_machine(
            transitions)
        self.coordinator = Machine.Coordinator(self.tasks,
                                               self.transitions,
                                               machine=self)
Esempio n. 6
0
 def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None):
     super(HyperVKvpReportingHandler, self).__init__()
     self._kvp_file_path = kvp_file_path
     self._event_types = event_types
     self.q = JQueue()
     self.kvp_file = None
     self.incarnation_no = self._get_incarnation_no()
     self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                               self.incarnation_no)
     self._current_offset = 0
     self.publish_thread = threading.Thread(
         target=self._publish_event_routine)
     self.publish_thread.daemon = True
     self.publish_thread.start()
Esempio n. 7
0
 def __init__(self,
              kvp_file_path=KVP_POOL_FILE_GUEST,
              event_types=None):
     super(HyperVKvpReportingHandler, self).__init__()
     self._kvp_file_path = kvp_file_path
     self._event_types = event_types
     self.running = False
     self.queue_lock = threading.Lock()
     self.running_lock = threading.Lock()
     self.q = JQueue()
     self.kvp_file = None
     self.incarnation_no = self._get_incarnation_no()
     self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                               self.incarnation_no)
     self._current_offset = 0
Esempio n. 8
0
    def __init__(self,
                 kvp_file_path=KVP_POOL_FILE_GUEST,
                 event_types=None):
        super(HyperVKvpReportingHandler, self).__init__()
        self._kvp_file_path = kvp_file_path
        HyperVKvpReportingHandler._truncate_guest_pool_file(
            self._kvp_file_path)

        self._event_types = event_types
        self.q = JQueue()
        self.incarnation_no = self._get_incarnation_no()
        self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                                  self.incarnation_no)
        self.publish_thread = threading.Thread(
                target=self._publish_event_routine)
        self.publish_thread.daemon = True
        self.publish_thread.start()
Esempio n. 9
0
 def __init__(self,buffers=1,workers=1):
     self.data_queue = Queue(buffers)
     self.notify = Event()
     self.status = True
     self.workers = workers
Esempio n. 10
0
def JoinableQueue(maxsize=0):
    """
    Returns a queue object
    """
    from multiprocessing.queues import JoinableQueue
    return JoinableQueue(maxsize)
Esempio n. 11
0
    def parallel(self, parsets, cpus=1, workdir_base=None, save=True,
                reuse_dirs=False, indices=None, verbose=True, logfile=None):

        if not os.name is "posix":
            # Use freeze_support for PCs
            freeze_support()

        # Determine if using working directories or not
        saved_workdir = self.workdir # Save workdir to reset after parallel run
        if not workdir_base is None: self.workdir_base = workdir_base
        if self.workdir_base is None: self.workdir = None

        #if len(hosts) > 0:
        if isinstance( cpus, dict):
            hosts = cpus
            cpus = sum([len(v) for v in hosts.values()])
            processors = [v for l in hosts.values() for v in l]
            hostnames = [k for k,v in hosts.items() for n in v]
            self.cpus = hosts
        elif isinstance(self.cpus,dict) and len(self.cpus) > 0:
            hosts = self.cpus
            cpus = sum([len(v) for v in hosts.values()])
            processors = [v for l in hosts.values() for v in l]
            hostnames = [k for k,v in hosts.items() for n in v]
        elif isinstance(cpus, int):
            hostnames = [None]*cpus
            processors = [None]*cpus
        else:
            print "Error: cpus argument is neither an integer nor a dictionary!"
            return

        # Determine number of samples and adjust cpus if samples < cpus requested
        if isinstance( parsets, numpy.ndarray ): n = parsets.shape[0]
        elif isinstance( parsets, list ): n = len(parsets)
        if n < cpus: cpus = n

        # Start cpus model runs
        resultsq = Queue()
        work = JoinableQueue()
        pool = []
        for i in range(cpus):
            p = Process(target=self.child, args=(work, resultsq, reuse_dirs, save, hostnames[i],processors[i]))
            p.daemon = True
            p.start()
            pool.append(p)

        iter_args = itertools.chain( parsets, ('',)*cpus )
        iter_smpind = itertools.chain( indices, ('',)*cpus )
        iter_lstind = itertools.chain( range(len(parsets)), ('',)*cpus )
        for item in zip(iter_args,iter_smpind,iter_lstind):
            work.put(item)
        
        if verbose or logfile: 
            if logfile: f = open(logfile, 'w')
            s = "%-8s" % 'index'
            for nm in self.parnames:
                s += " %16s" % nm
            header = True

        results = [[numpy.NAN]]*len(parsets)
        for i in range(len(parsets)):
            lst_ind, smp_ind, resp = resultsq.get()
            if isinstance( resp, str):
                if logfile: 
                    f.write(resp+'\n')
                    f.flush()
            else:
                if isinstance( resp, OrderedDict):
                    self._set_simvalues(resp)
                    results[lst_ind] = resp.values()
                if verbose or logfile:
                    if header:
                        for nm in self.obsnames:
                            s += " %16s" % nm
                        s += '\n'
                        if verbose: print s,
                        if logfile: 
                            f.write( s )
                            f.flush()
                        header = False
                    s = "%-8d" % smp_ind
                    for v in parsets[lst_ind]:
                        s += " %16lf" % v
                    if results[lst_ind] is not numpy.NAN:
                        for v in results[lst_ind]:
                            s += " %16lf" % v
                    s += '\n'
                    if verbose: print s,
                    if logfile: 
                        f.write( s )
                        f.flush()
        if logfile: f.close()

        for i in range(len(results)):
            if results[i] is numpy.NAN:
                if len(self.obs) > 0:
                    results[i] = [numpy.NAN]*len(self.obs)

        for p in pool:
            p.join()

        # Clean parent
        self.workdir = saved_workdir
        results = numpy.array(results)
        if results.shape[1] == 1:
            if all(numpy.isnan(r[0]) for r in results):
                results = None

        return results, parsets   
Esempio n. 12
0
class HyperVKvpReportingHandler(ReportingHandler):
    """
    Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
    and can be used to obtain high level diagnostic information from the host.

    To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
    running. It reads the kvp_file when the host requests the guest to
    enumerate the KVP's.

    This reporter collates all events for a module (origin|name) in a single
    json string in the dictionary.

    For more information, see
    https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
    """
    HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
    HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
    HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
                          HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
    EVENT_PREFIX = 'CLOUD_INIT'
    MSG_KEY = 'msg'
    RESULT_KEY = 'result'
    DESC_IDX_KEY = 'msg_i'
    JSON_SEPARATORS = (',', ':')
    KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'

    def __init__(self,
                 kvp_file_path=KVP_POOL_FILE_GUEST,
                 event_types=None):
        super(HyperVKvpReportingHandler, self).__init__()
        self._kvp_file_path = kvp_file_path
        self._event_types = event_types
        self.running = False
        self.queue_lock = threading.Lock()
        self.running_lock = threading.Lock()
        self.q = JQueue()
        self.kvp_file = None
        self.incarnation_no = self._get_incarnation_no()
        self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                                  self.incarnation_no)
        self._current_offset = 0

    def _get_incarnation_no(self):
        """
        use the time passed as the incarnation number.
        the incarnation number is the number which are used to
        distinguish the old data stored in kvp and the new data.
        """
        uptime_str = util.uptime()
        try:
            return int(time.time() - float(uptime_str))
        except ValueError:
            LOG.warning("uptime '%s' not in correct format.", uptime_str)
            return 0

    def _iterate_kvps(self, offset):
        """iterate the kvp file from the current offset."""
        try:
            with open(self._kvp_file_path, 'rb+') as f:
                self.kvp_file = f
                fcntl.flock(f, fcntl.LOCK_EX)
                f.seek(offset)
                record_data = f.read(self.HV_KVP_RECORD_SIZE)
                while len(record_data) == self.HV_KVP_RECORD_SIZE:
                    self._current_offset += self.HV_KVP_RECORD_SIZE
                    kvp_item = self._decode_kvp_item(record_data)
                    yield kvp_item
                    record_data = f.read(self.HV_KVP_RECORD_SIZE)
                fcntl.flock(f, fcntl.LOCK_UN)
        finally:
            self.kvp_file = None

    def _event_key(self, event):
        """
        the event key format is:
        CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
        """
        return u"{0}|{1}|{2}".format(self.event_key_prefix,
                                     event.event_type, event.name)

    def _encode_kvp_item(self, key, value):
        data = (struct.pack("%ds%ds" % (
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
                key.encode('utf-8'), value.encode('utf-8')))
        return data

    def _decode_kvp_item(self, record_data):
        record_data_len = len(record_data)
        if record_data_len != self.HV_KVP_RECORD_SIZE:
            raise ReportException(
                "record_data len not correct {0} {1}."
                .format(record_data_len, self.HV_KVP_RECORD_SIZE))
        k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
                                                             .strip('\x00'))
        v = (
            record_data[
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
                ].decode('utf-8').strip('\x00'))

        return {'key': k, 'value': v}

    def _update_kvp_item(self, record_data):
        if self.kvp_file is None:
            raise ReportException(
                "kvp file '{0}' not opened."
                .format(self._kvp_file_path))
        self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
        self.kvp_file.write(record_data)

    def _append_kvp_item(self, record_data):
        with open(self._kvp_file_path, 'rb+') as f:
            fcntl.flock(f, fcntl.LOCK_EX)
            # seek to end of the file
            f.seek(0, 2)
            f.write(record_data)
            f.flush()
            fcntl.flock(f, fcntl.LOCK_UN)
            self._current_offset = f.tell()

    def _break_down(self, key, meta_data, description):
        del meta_data[self.MSG_KEY]
        des_in_json = json.dumps(description)
        des_in_json = des_in_json[1:(len(des_in_json) - 1)]
        i = 0
        result_array = []
        message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
        while True:
            meta_data[self.DESC_IDX_KEY] = i
            meta_data[self.MSG_KEY] = ''
            data_without_desc = json.dumps(meta_data,
                                           separators=self.JSON_SEPARATORS)
            room_for_desc = (
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
                len(data_without_desc) - 8)
            value = data_without_desc.replace(
                message_place_holder,
                '"{key}":"{desc}"'.format(
                    key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
            result_array.append(self._encode_kvp_item(key, value))
            i += 1
            des_in_json = des_in_json[room_for_desc:]
            if len(des_in_json) == 0:
                break
        return result_array

    def _encode_event(self, event):
        """
        encode the event into kvp data bytes.
        if the event content reaches the maximum length of kvp value.
        then it would be cut to multiple slices.
        """
        key = self._event_key(event)
        meta_data = {
                "name": event.name,
                "type": event.event_type,
                "ts": (datetime.utcfromtimestamp(event.timestamp)
                       .isoformat() + 'Z'),
                }
        if hasattr(event, self.RESULT_KEY):
            meta_data[self.RESULT_KEY] = event.result
        meta_data[self.MSG_KEY] = event.description
        value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
        # if it reaches the maximum length of kvp value,
        # break it down to slices.
        # this should be very corner case.
        if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
            return self._break_down(key, meta_data, event.description)
        else:
            data = self._encode_kvp_item(key, value)
            return [data]

    def _publish_event_routine(self):
        while True:
            event = None
            try:
                # acquire the lock.
                event = self.q.get_nowait()
                need_append = True
                try:
                    if not os.path.exists(self._kvp_file_path):
                        LOG.warning(
                            "skip writing events %s to %s. file not present.",
                            event.as_string(),
                            self._kvp_file_path)
                    encoded_event = self._encode_event(event)
                    # for each encoded_event
                    for encoded_data in (encoded_event):
                        for kvp in self._iterate_kvps(self._current_offset):
                            match = (
                                re.match(
                                    r"^{0}\|(\d+)\|.+"
                                    .format(self.EVENT_PREFIX),
                                    kvp['key']
                                ))
                            if match:
                                match_groups = match.groups(0)
                                if int(match_groups[0]) < self.incarnation_no:
                                    need_append = False
                                    self._update_kvp_item(encoded_data)
                                    break
                        if need_append:
                            self._append_kvp_item(encoded_data)
                except IOError as e:
                    LOG.warning(
                        "failed posting event to kvp: %s e:%s",
                        event.as_string(), e)
                    self.running = False
                    break
                finally:
                    self.q.task_done()
            except queue.Empty:
                with self.queue_lock:
                    # double check the queue is empty
                    if self.q.empty():
                        self.running = False
                        break

    def trigger_publish_event(self):
        if not self.running:
            with self.running_lock:
                if not self.running:
                    self.running = True
                    thread = threading.Thread(
                        target=self._publish_event_routine)
                    thread.start()

    # since the saving to the kvp pool can be a time costing task
    # if the kvp pool already contains a chunk of data,
    # so defer it to another thread.
    def publish_event(self, event):
        if (not self._event_types or event.event_type in self._event_types):
            with self.queue_lock:
                self.q.put(event)
            self.trigger_publish_event()
Esempio n. 13
0
 def __setstate__(self, state):
     JoinableQueue.__setstate__(self, state[:-1])
     self._cursize, = state[-1:]
Esempio n. 14
0
 def __getstate__(self):
     return JoinableQueue.__getstate__(self) + (self._cursize, )
Esempio n. 15
0
class HyperVKvpReportingHandler(ReportingHandler):
    """
    Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
    and can be used to obtain high level diagnostic information from the host.

    To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
    running. It reads the kvp_file when the host requests the guest to
    enumerate the KVP's.

    This reporter collates all events for a module (origin|name) in a single
    json string in the dictionary.

    For more information, see
    https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
    """
    HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
    HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
    HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
                          HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
    EVENT_PREFIX = 'CLOUD_INIT'
    MSG_KEY = 'msg'
    RESULT_KEY = 'result'
    DESC_IDX_KEY = 'msg_i'
    JSON_SEPARATORS = (',', ':')
    KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
    _already_truncated_pool_file = False

    def __init__(self,
                 kvp_file_path=KVP_POOL_FILE_GUEST,
                 event_types=None):
        super(HyperVKvpReportingHandler, self).__init__()
        self._kvp_file_path = kvp_file_path
        HyperVKvpReportingHandler._truncate_guest_pool_file(
            self._kvp_file_path)

        self._event_types = event_types
        self.q = JQueue()
        self.incarnation_no = self._get_incarnation_no()
        self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                                  self.incarnation_no)
        self.publish_thread = threading.Thread(
                target=self._publish_event_routine)
        self.publish_thread.daemon = True
        self.publish_thread.start()

    @classmethod
    def _truncate_guest_pool_file(cls, kvp_file):
        """
        Truncate the pool file if it has not been truncated since boot.
        This should be done exactly once for the file indicated by
        KVP_POOL_FILE_GUEST constant above. This method takes a filename
        so that we can use an arbitrary file during unit testing.
        Since KVP is a best-effort telemetry channel we only attempt to
        truncate the file once and only if the file has not been modified
        since boot. Additional truncation can lead to loss of existing
        KVPs.
        """
        if cls._already_truncated_pool_file:
            return
        boot_time = time.time() - float(util.uptime())
        try:
            if os.path.getmtime(kvp_file) < boot_time:
                with open(kvp_file, "w"):
                    pass
        except (OSError, IOError) as e:
            LOG.warning("failed to truncate kvp pool file, %s", e)
        finally:
            cls._already_truncated_pool_file = True

    def _get_incarnation_no(self):
        """
        use the time passed as the incarnation number.
        the incarnation number is the number which are used to
        distinguish the old data stored in kvp and the new data.
        """
        uptime_str = util.uptime()
        try:
            return int(time.time() - float(uptime_str))
        except ValueError:
            LOG.warning("uptime '%s' not in correct format.", uptime_str)
            return 0

    def _iterate_kvps(self, offset):
        """iterate the kvp file from the current offset."""
        with open(self._kvp_file_path, 'rb') as f:
            fcntl.flock(f, fcntl.LOCK_EX)
            f.seek(offset)
            record_data = f.read(self.HV_KVP_RECORD_SIZE)
            while len(record_data) == self.HV_KVP_RECORD_SIZE:
                kvp_item = self._decode_kvp_item(record_data)
                yield kvp_item
                record_data = f.read(self.HV_KVP_RECORD_SIZE)
            fcntl.flock(f, fcntl.LOCK_UN)

    def _event_key(self, event):
        """
        the event key format is:
        CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time>
        """
        return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
                                         event.event_type, event.name,
                                         uuid.uuid4())

    def _encode_kvp_item(self, key, value):
        data = (struct.pack("%ds%ds" % (
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
                key.encode('utf-8'), value.encode('utf-8')))
        return data

    def _decode_kvp_item(self, record_data):
        record_data_len = len(record_data)
        if record_data_len != self.HV_KVP_RECORD_SIZE:
            raise ReportException(
                "record_data len not correct {0} {1}."
                .format(record_data_len, self.HV_KVP_RECORD_SIZE))
        k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
                                                             .strip('\x00'))
        v = (
            record_data[
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
                ].decode('utf-8').strip('\x00'))

        return {'key': k, 'value': v}

    def _append_kvp_item(self, record_data):
        with open(self._kvp_file_path, 'ab') as f:
            fcntl.flock(f, fcntl.LOCK_EX)
            for data in record_data:
                f.write(data)
            f.flush()
            fcntl.flock(f, fcntl.LOCK_UN)

    def _break_down(self, key, meta_data, description):
        del meta_data[self.MSG_KEY]
        des_in_json = json.dumps(description)
        des_in_json = des_in_json[1:(len(des_in_json) - 1)]
        i = 0
        result_array = []
        message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
        while True:
            meta_data[self.DESC_IDX_KEY] = i
            meta_data[self.MSG_KEY] = ''
            data_without_desc = json.dumps(meta_data,
                                           separators=self.JSON_SEPARATORS)
            room_for_desc = (
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
                len(data_without_desc) - 8)
            value = data_without_desc.replace(
                message_place_holder,
                '"{key}":"{desc}"'.format(
                    key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
            result_array.append(self._encode_kvp_item(key, value))
            i += 1
            des_in_json = des_in_json[room_for_desc:]
            if len(des_in_json) == 0:
                break
        return result_array

    def _encode_event(self, event):
        """
        encode the event into kvp data bytes.
        if the event content reaches the maximum length of kvp value.
        then it would be cut to multiple slices.
        """
        key = self._event_key(event)
        meta_data = {
                "name": event.name,
                "type": event.event_type,
                "ts": (datetime.utcfromtimestamp(event.timestamp)
                       .isoformat() + 'Z'),
                }
        if hasattr(event, self.RESULT_KEY):
            meta_data[self.RESULT_KEY] = event.result
        meta_data[self.MSG_KEY] = event.description
        value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
        # if it reaches the maximum length of kvp value,
        # break it down to slices.
        # this should be very corner case.
        if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
            return self._break_down(key, meta_data, event.description)
        else:
            data = self._encode_kvp_item(key, value)
            return [data]

    def _publish_event_routine(self):
        while True:
            items_from_queue = 0
            try:
                event = self.q.get(block=True)
                items_from_queue += 1
                encoded_data = []
                while event is not None:
                    encoded_data += self._encode_event(event)
                    try:
                        # get all the rest of the events in the queue
                        event = self.q.get(block=False)
                        items_from_queue += 1
                    except QueueEmptyError:
                        event = None
                try:
                    self._append_kvp_item(encoded_data)
                except (OSError, IOError) as e:
                    LOG.warning("failed posting events to kvp, %s", e)
                finally:
                    for _ in range(items_from_queue):
                        self.q.task_done()
            # when main process exits, q.get() will through EOFError
            # indicating we should exit this thread.
            except EOFError:
                return

    # since the saving to the kvp pool can be a time costing task
    # if the kvp pool already contains a chunk of data,
    # so defer it to another thread.
    def publish_event(self, event):
        if not self._event_types or event.event_type in self._event_types:
            self.q.put(event)

    def flush(self):
        LOG.debug('HyperVReportingHandler flushing remaining events')
        self.q.join()
def JoinableQueue(maxsize=0):
    from multiprocessing.queues import JoinableQueue
    return JoinableQueue(maxsize)
Esempio n. 17
0
            if (datetime.now() - period_start) >= self.period:
                try:
                    self.conf_check()
                except:
                    logger.debug("Exception in agent watchdog %s", sys.exc_info()[1])
                # Start a new period
                period_start = datetime.now()
            # Polling for shutdown must be fast
            time.sleep(1)
            try:
                os.kill(self.parent_pid, 0)
            except:
                logger.debug("Parent process killed, suicide this agent")
                logging.shutdown()
                # Rage quit should be universal
                os.kill(os.getpid(), signal.SIGKILL)
                return


if __name__ == "__main__":
    """ Use this when debugging agents """
    logging.config.fileConfig("../scripts/agentcluster-log.conf")
    pysnmplogger = logging.getLogger("pysnmp")
    if pysnmplogger.isEnabledFor(logging.DEBUG):
        debug.setLogger(debug.Debug("all"))
    logger.info("Test agent alone")
    tokens_start = JoinableQueue()
    tokens_start.put(object())
    agent = Agent("../tests/agents/windows/windows.agent", tokens_start, None, 0)
    agent.run()
Esempio n. 18
0
class JobQueue(JoinableQueue):
    """JoinableQueue subclass which spin a pool of workers to execute job in
    background, workers can be either threads or processes. The distinction can
    be assumed based on the nature of the tasks, being them more of I/O bound
    tasks or CPU bound tasks.

    Attributes
    ----------
    :type num_workers: int or 4
    :param num_workers: The number of workers thread/proesses in charge to
                        execute incoming jobs to spawn

    :type start_method: str or 'fork'
    :param start_method: The spawn method of the Joinable queue parent class

    :type worker_class: worker.Worker
    :param worker_class: The worker subclass to use as the thread/process
                         workers

    """
    def __init__(
        self,
        num_workers=4,
        start_method="fork",
        worker_class=ProcessQueueExecutor,
    ):
        # if not isinstance(worker_class, Worker):
        #     raise Exception
        # Retrieve the spawn context for the joinable queue super class
        ctx = get_context(start_method)
        # Init super class
        super().__init__(ctx=ctx)
        # Number of workers to spawn
        self._num_workers = num_workers
        # JoinableQueue to store completed jobs
        self._completed_jobs = JoinableQueue(ctx=ctx)
        # Worker class, can be either Process or Thread
        self._workerclass = worker_class
        self._results = {}
        threading.Thread(
            target=gather_results,
            args=(self._results, self._completed_jobs),
            daemon=True,
        ).start()
        # Spin the workers
        self.start_workers()

    @property
    def num_workers(self):
        return self._num_workers

    def add_job(self, job):
        """Add a job to the queue to be executed

        Args:
        -----
        :type job: tasq.Job
        :param job: The `tasq.Job` object containing the function to be
                    executed
        """
        # TODO ugly
        if isinstance(job, bytes):
            obj = serde.loads(job)
        else:
            obj = job
        self._results[obj.job_id] = Future()
        self.put(job)
        return self._results[obj.job_id]

    def shutdown(self):
        self._completed_jobs.put(None)

    def start_workers(self):
        """Create and start all the workers"""
        for _ in range(self.num_workers):
            w = self._workerclass(self, self._completed_jobs)
            w.start()

    def route(self, job):
        return self.add_job(job)
Esempio n. 19
0
class AsyncIO(object):
    def __init__(self,buffers=1,workers=1):
        self.data_queue = Queue(buffers)
        self.notify = Event()
        self.status = True
        self.workers = workers

    def wrapper(self,func):
        queue = self.data_queue
        notify = self.notify
        epoch = batch = 0
        n_arg = _IO.callback_type(func)
        if n_arg==2:
            while not notify.is_set():
                try:
                    a,b=func(epoch,batch)
                    queue.put((a,b))
                    batch += 1
                except TypeError:
                    epoch+=1
                    batch =0
                    queue.put((epoch,))
        if n_arg==1:
             while not notify.is_set():
                try:
                    a,b=func(batch)
                    queue.put((a,b))
                    batch += 1
                except TypeError:
                    batch =0
                    queue.put((epoch,))
        if n_arg==0:
             while not notify.is_set():
                try:
                    a,b=func()
                    queue.put((a,b))
                    batch += 1
                except TypeError:
                    epoch+=1
                    batch =0
                    queue.put((epoch,))

    def get(self):
        return self.data_queue.get()
    def end(self):
        self.notify.set()
        for worker in self.workers:
            if worker.is_alive():
                try:
                    _=self.data_queue.get_nowait()
                except Empty:
                    pass


    def __call__(self,callback):
        workers=[Process(target=self.wrapper,
                        args=(callback,)) for _ in range(self.workers)]
        for worker in workers:
            worker.daemon = True
        self.workers = workers

        for worker in workers:
            worker.start()
        return self
Esempio n. 20
0
class HyperVKvpReportingHandler(ReportingHandler):
    """
    Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
    and can be used to obtain high level diagnostic information from the host.

    To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
    running. It reads the kvp_file when the host requests the guest to
    enumerate the KVP's.

    This reporter collates all events for a module (origin|name) in a single
    json string in the dictionary.

    For more information, see
    https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
    """
    HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
    HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
    HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
                          HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
    EVENT_PREFIX = 'CLOUD_INIT'
    MSG_KEY = 'msg'
    RESULT_KEY = 'result'
    DESC_IDX_KEY = 'msg_i'
    JSON_SEPARATORS = (',', ':')
    KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
    _already_truncated_pool_file = False

    def __init__(self,
                 kvp_file_path=KVP_POOL_FILE_GUEST,
                 event_types=None):
        super(HyperVKvpReportingHandler, self).__init__()
        self._kvp_file_path = kvp_file_path
        HyperVKvpReportingHandler._truncate_guest_pool_file(
            self._kvp_file_path)

        self._event_types = event_types
        self.q = JQueue()
        self.incarnation_no = self._get_incarnation_no()
        self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
                                                  self.incarnation_no)
        self.publish_thread = threading.Thread(
                target=self._publish_event_routine)
        self.publish_thread.daemon = True
        self.publish_thread.start()

    @classmethod
    def _truncate_guest_pool_file(cls, kvp_file):
        """
        Truncate the pool file if it has not been truncated since boot.
        This should be done exactly once for the file indicated by
        KVP_POOL_FILE_GUEST constant above. This method takes a filename
        so that we can use an arbitrary file during unit testing.
        Since KVP is a best-effort telemetry channel we only attempt to
        truncate the file once and only if the file has not been modified
        since boot. Additional truncation can lead to loss of existing
        KVPs.
        """
        if cls._already_truncated_pool_file:
            return
        boot_time = time.time() - float(util.uptime())
        try:
            if os.path.getmtime(kvp_file) < boot_time:
                with open(kvp_file, "w"):
                    pass
        except (OSError, IOError) as e:
            LOG.warning("failed to truncate kvp pool file, %s", e)
        finally:
            cls._already_truncated_pool_file = True

    def _get_incarnation_no(self):
        """
        use the time passed as the incarnation number.
        the incarnation number is the number which are used to
        distinguish the old data stored in kvp and the new data.
        """
        uptime_str = util.uptime()
        try:
            return int(time.time() - float(uptime_str))
        except ValueError:
            LOG.warning("uptime '%s' not in correct format.", uptime_str)
            return 0

    def _iterate_kvps(self, offset):
        """iterate the kvp file from the current offset."""
        with open(self._kvp_file_path, 'rb') as f:
            fcntl.flock(f, fcntl.LOCK_EX)
            f.seek(offset)
            record_data = f.read(self.HV_KVP_RECORD_SIZE)
            while len(record_data) == self.HV_KVP_RECORD_SIZE:
                kvp_item = self._decode_kvp_item(record_data)
                yield kvp_item
                record_data = f.read(self.HV_KVP_RECORD_SIZE)
            fcntl.flock(f, fcntl.LOCK_UN)

    def _event_key(self, event):
        """
        the event key format is:
        CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
        """
        return u"{0}|{1}|{2}".format(self.event_key_prefix,
                                     event.event_type, event.name)

    def _encode_kvp_item(self, key, value):
        data = (struct.pack("%ds%ds" % (
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
                key.encode('utf-8'), value.encode('utf-8')))
        return data

    def _decode_kvp_item(self, record_data):
        record_data_len = len(record_data)
        if record_data_len != self.HV_KVP_RECORD_SIZE:
            raise ReportException(
                "record_data len not correct {0} {1}."
                .format(record_data_len, self.HV_KVP_RECORD_SIZE))
        k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
                                                             .strip('\x00'))
        v = (
            record_data[
                self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
                ].decode('utf-8').strip('\x00'))

        return {'key': k, 'value': v}

    def _append_kvp_item(self, record_data):
        with open(self._kvp_file_path, 'ab') as f:
            fcntl.flock(f, fcntl.LOCK_EX)
            for data in record_data:
                f.write(data)
            f.flush()
            fcntl.flock(f, fcntl.LOCK_UN)

    def _break_down(self, key, meta_data, description):
        del meta_data[self.MSG_KEY]
        des_in_json = json.dumps(description)
        des_in_json = des_in_json[1:(len(des_in_json) - 1)]
        i = 0
        result_array = []
        message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
        while True:
            meta_data[self.DESC_IDX_KEY] = i
            meta_data[self.MSG_KEY] = ''
            data_without_desc = json.dumps(meta_data,
                                           separators=self.JSON_SEPARATORS)
            room_for_desc = (
                self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
                len(data_without_desc) - 8)
            value = data_without_desc.replace(
                message_place_holder,
                '"{key}":"{desc}"'.format(
                    key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
            result_array.append(self._encode_kvp_item(key, value))
            i += 1
            des_in_json = des_in_json[room_for_desc:]
            if len(des_in_json) == 0:
                break
        return result_array

    def _encode_event(self, event):
        """
        encode the event into kvp data bytes.
        if the event content reaches the maximum length of kvp value.
        then it would be cut to multiple slices.
        """
        key = self._event_key(event)
        meta_data = {
                "name": event.name,
                "type": event.event_type,
                "ts": (datetime.utcfromtimestamp(event.timestamp)
                       .isoformat() + 'Z'),
                }
        if hasattr(event, self.RESULT_KEY):
            meta_data[self.RESULT_KEY] = event.result
        meta_data[self.MSG_KEY] = event.description
        value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
        # if it reaches the maximum length of kvp value,
        # break it down to slices.
        # this should be very corner case.
        if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
            return self._break_down(key, meta_data, event.description)
        else:
            data = self._encode_kvp_item(key, value)
            return [data]

    def _publish_event_routine(self):
        while True:
            items_from_queue = 0
            try:
                event = self.q.get(block=True)
                items_from_queue += 1
                encoded_data = []
                while event is not None:
                    encoded_data += self._encode_event(event)
                    try:
                        # get all the rest of the events in the queue
                        event = self.q.get(block=False)
                        items_from_queue += 1
                    except QueueEmptyError:
                        event = None
                try:
                    self._append_kvp_item(encoded_data)
                except (OSError, IOError) as e:
                    LOG.warning("failed posting events to kvp, %s", e)
                finally:
                    for _ in range(items_from_queue):
                        self.q.task_done()
            # when main process exits, q.get() will through EOFError
            # indicating we should exit this thread.
            except EOFError:
                return

    # since the saving to the kvp pool can be a time costing task
    # if the kvp pool already contains a chunk of data,
    # so defer it to another thread.
    def publish_event(self, event):
        if not self._event_types or event.event_type in self._event_types:
            self.q.put(event)

    def flush(self):
        LOG.debug('HyperVReportingHandler flushing remaining events')
        self.q.join()
Esempio n. 21
0
    def run(self):

        transportDispatcher = None
        try:
            # Initialize the engine
            self.tokens_start.get()
            if self.active is not None and self.active.lower() == "false":

                # Changes the process name shown by ps for instance
                setProcTitle("agentcluster agent  [active: False]  [name: %s]" % self.name)

                logger.info('Agent "%s": inactive', self.name)
                # Generates a deadlock to enter in sleep mode
                # Only an external signal can break this deadlock
                self.tokens_start.task_done()
                queue = JoinableQueue()
                queue.put(object())
                queue.join()

            # Changes the process name shown by ps for instance
            setProcTitle("agentcluster agent  [active: True ]  [name: %s]" % self.name)

            logger.info('Agent "%s": run', self.name)
            logger.debug('EngineID="%s"', self.engineID)

            engineID_bin = None
            if self.engineID != None:
                try:
                    engineID_bin = self.engineID.decode("hex")
                except Exception:
                    logger.warn(
                        "Cannot convert configured engine ID to byte array, engine ID ignored: %s", self.engineID
                    )
                    logger.debug("", exc_info=True)
            else:
                logger.debug("No context engineID specified, let pysnmp generate one")

            snmpEngine = engine.SnmpEngine(snmpEngineID=engineID_bin)

            logger.debug('Agent "%s": Configure transport layer', self.name)
            for protocol, params in self.listen.__dict__.items():
                if type(params) is list:
                    for param in params:
                        (domain, socket) = self.socketHelper.openSocket(protocol, param.encode("ascii"))
                        config.addSocketTransport(snmpEngine, domain, socket)
                else:
                    (domain, socket) = self.socketHelper.openSocket(protocol, params.encode("ascii"))
                    config.addSocketTransport(snmpEngine, domain, socket)

            logger.debug('Agent "%s": Configure application layer', self.name)
            snmpContext = context.SnmpContext(snmpEngine)
            if self.snmpv1 is not None:
                SnmpConfHelperV1().configure(snmpEngine, snmpContext, self.snmpv1)
            if self.snmpv2c is not None:
                SnmpConfHelperV2().configure(snmpEngine, snmpContext, self.snmpv2c)
            if self.snmpv3 is not None:
                SnmpConfHelperV3().configure(snmpEngine, snmpContext, self.snmpv3)

            cmdrsp.GetCommandResponder(snmpEngine, snmpContext)
            cmdrsp.SetCommandResponder(snmpEngine, snmpContext)
            cmdrsp.NextCommandResponder(snmpEngine, snmpContext)
            cmdrsp.BulkCommandResponder(snmpEngine, snmpContext)

            logger.debug('Agent "%s": Configured', self.name)
            self.tokens_start.task_done()

            logger.debug("Starting parent and database watchdog")
            self.monitor = Watchdog(self.parent_pid, self.monitoring_period)
            self.monitor.start()

            # Job will never end unless killed
            logger.debug('Agent "%s": Running dispatcher', self.name)
            transportDispatcher = snmpEngine.transportDispatcher
            transportDispatcher.jobStarted(1)
            transportDispatcher.runDispatcher()

        except KeyboardInterrupt:
            logger.debug('Agent "%s": interrupted', self.name)
        except Exception:
            logger.error("Unexpected exception catched in agent: %s", sys.exc_info()[1])
            logger.error("", exc_info=True)
        finally:
            if transportDispatcher != None:
                transportDispatcher.closeDispatcher()
            logger.info('Agent "%s": end', self.name)
            logging.shutdown()
            try:
                # Issue #3: Python 2.7.6 releases the parent process if children is killed
                # not Python 2.6.6 so we must still release the token.
                self.tokens_start.task_done()
            except:
                pass
            # Issue #3: This agent is no longer usable so commit suicide to be sure
            # This process won't become a zombie and that parent will start a new agent
            os.kill(os.getpid(), signal.SIGKILL)