Ejemplo n.º 1
0
def generate_requests(hosts, jolokia_port, jolokia_prefix, jolokia_user,
                      jolokia_password):
    """Return a generator of requests to fetch the under replicated
    partition number from the specified hosts.

    :param hosts: list of brokers ip addresses
    :type hosts: list of strings
    :param jolokia_port: HTTP port for Jolokia
    :type jolokia_port: integer
    :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
    :type jolokia_prefix: string
    :param jolokia_user: Username for Jolokia, if needed
    :type jolokia_user: string
    :param jolokia_password: Password for Jolokia, if needed
    :type jolokia_password: string
    :returns: generator of requests
    """
    session = FuturesSession()
    if jolokia_user and jolokia_password:
        session.auth = (jolokia_user, jolokia_password)
    for host in hosts:
        url = "http://{host}:{port}/{prefix}/read/{key}".format(
            host=host,
            port=jolokia_port,
            prefix=jolokia_prefix,
            key=UNDER_REPL_KEY,
        )
        yield host, session.get(url)
Ejemplo n.º 2
0
def home(request, album_key):
    response = cache.get(album_key)

    if response is None:
        session = FuturesSession(max_workers=5)
        session.auth = AlchemyApiAuth(settings.ALCHEMYAPI_KEY)

        futures = []
        source_item, reviews, comment_by_comment_key = get_rdio_comments(album_key)
        for comment_key, comment_text in comment_by_comment_key.iteritems():
            futures.append(start_request(session, comment_key, comment_text))

        sentiment_by_comment_key = complete_requests(futures)
        total_sentiment, per_item_sentiment = aggregate_sentiment(reviews, sentiment_by_comment_key)

        response = {
            'item': source_item,
            'total_sentiment': total_sentiment,
            'per_item_sentiment': per_item_sentiment,
            'sentiment_by_comment_key': sentiment_by_comment_key,
            'comment_by_comment_key': comment_by_comment_key,
        }

        response = json.dumps(response, indent=2)
        cache.set(album_key, response)

    return http.HttpResponse(response,
                             content_type='application/json')
Ejemplo n.º 3
0
    def _establish_session(self, consumer_key, consumer_secret, access_token, access_token_secret, max_workers, user_agent):
        """

        """

        headers = {}
        if user_agent:
            headers['User-Agent'] = user_agent

        oauth1 = OAuth1(client_key=consumer_key,
                        client_secret=consumer_secret,
                        resource_owner_key=access_token,
                        resource_owner_secret=access_token_secret)
        session = FuturesSession(max_workers=max_workers)
        session.auth = oauth1
        session.headers.update(headers)
        logger.debug('Oauth1 session initialized.')

        return session
Ejemplo n.º 4
0
    def _establish_session(self, consumer_key, consumer_secret, access_token,
                           access_token_secret, max_workers, user_agent):
        """

        """

        headers = {}
        if user_agent:
            headers['User-Agent'] = user_agent

        oauth1 = OAuth1(client_key=consumer_key,
                        client_secret=consumer_secret,
                        resource_owner_key=access_token,
                        resource_owner_secret=access_token_secret)
        session = FuturesSession(max_workers=max_workers)
        session.auth = oauth1
        session.headers.update(headers)
        logger.debug('Oauth1 session initialized.')

        return session
Ejemplo n.º 5
0
    def get_all_issues(self, project_key, total, max_results):
        """Fetch all project issues."""
        log.debug("%s issues to fetch" % total)

        # Setup a session for concurrent fetching
        s = FuturesSession(executor=ThreadPoolExecutor(max_workers=4))
        s.auth = (self.username, self.password)
        s.params = {
            'jql': "project=%s" % project_key,
            'fields': 'summary,description,issuetype,status,resolution',
            'maxResults': max_results
        }
        s.headers = {'Content-Type': 'application/json'}

        def parse_json_cb(sess, resp):
            resp.data = map(
                lambda item: {
                    'key': item['key'],
                    'summary': item['fields']['summary'],
                    'description': item['fields']['description'],
                    'type': item['fields']['issuetype']['name'],
                    'status': item['fields']['status']['name'],
                    'resolved': True if item['fields']['resolution'] else False
                },
                resp.json()['issues'])

        def get_issues(start_at=0):
            future = s.get("%s/search" % self.url,
                           params={'startAt': start_at},
                           background_callback=parse_json_cb)
            next_at = start_at + max_results
            log.debug("... %s/%s" % (min(next_at, total), total))
            if next_at < total:
                data = get_issues(next_at)
            else:
                return future.result().data
            return future.result().data + data

        issues = get_issues()
        return issues
Ejemplo n.º 6
0
    def get_all_issues(self, project_key, total, max_results):
        """Fetch all project issues."""
        log.debug("%s issues to fetch" % total)

        # Setup a session for concurrent fetching
        s = FuturesSession(executor=ThreadPoolExecutor(max_workers=4))
        s.auth = (self.username, self.password)
        s.params = {
            'jql': "project=%s" % project_key,
            'fields': 'summary,description,issuetype,status,resolution',
            'maxResults': max_results
            }
        s.headers = {'Content-Type': 'application/json'}

        def parse_json_cb(sess, resp):
            resp.data = map(lambda item: {
                'key': item['key'],
                'summary': item['fields']['summary'],
                'description': item['fields']['description'],
                'type': item['fields']['issuetype']['name'],
                'status': item['fields']['status']['name'],
                'resolved': True if item['fields']['resolution'] else False
                }, resp.json()['issues'])

        def get_issues(start_at=0):
            future = s.get(
                "%s/search" % self.url,
                params={'startAt': start_at},
                background_callback=parse_json_cb)
            next_at = start_at + max_results
            log.debug("... %s/%s" % (min(next_at, total), total))
            if next_at < total:
                data = get_issues(next_at)
            else:
                return future.result().data
            return future.result().data + data

        issues = get_issues()
        return issues
Ejemplo n.º 7
0
def start_detecting(options):
    """Run the main detection loop."""
    server_ip = options.ip
    sensitivity = options.sensitivity
    sig_bins = [options.bin1, options.bin2, options.bin3, options.bin4]
    sig_scales = [
        options.scale1, options.scale2, options.scale3, options.scale4
    ]

    # To avoid having to do time system calls which may be expensive and return non-monotonic
    # values, all timings rely on the number of audio chunks processed, because the duration of
    # one chunk is a known fixed time.
    chunk_duration = float(NUM_SAMPLES) / SAMPLING_RATE
    request_countdown = int(round(float(options.timeout + 1) / chunk_duration))

    # HTTP requests to PWM server are done asynchronously. We really don't want to risk a buffer
    # overflow due to a slow response. Only when we're sure the request will be either done or
    # has timed out, check on it to print an error message if it failed.
    session = FuturesSession(max_workers=4)
    if options.user and options.password:
        session.auth = HTTPDigestAuth(options.user, options.password)

    # Deques are very efficient for a FIFO like this.
    futures = deque()
    future_countdowns = deque()

    # This will probably barf many errors, you could clean up your asound.conf to get rid of
    # some of them, but they are harmless anyway.
    audio = pyaudio.PyAudio()

    # Perform a first request to the PWM server. This has three functions:
    # 1, warn early if the server isn't running;
    # 2, ensure the server is in enabled state;
    # 3, ensure these bits of code are already cached when we need to do our first real request.
    attempts_left = 4
    while server_ip and attempts_left:
        future = session.get('http://{}:{}/api/enable?basic=1'.format(
            server_ip, options.port))
        try:
            req = future.result()
            if req.status_code == 200:
                LOG.info("OK: Successfully enabled PWM server.")
                break
            else:
                LOG.warning("Test request to PWM server failed with status %s",
                            req.status_code)
        except requests.ConnectionError as err:
            LOG.warning("The PWM server may be down? %s", err)
        attempts_left -= 1
        LOG.warning("Attempts left: %d", attempts_left)
        if attempts_left:
            sleep(2)
        else:
            LOG.error("PWM server could not be contacted! Continuing anyway.")

    LOG.info("Beep sequence detector started.")

    # Also keep track of the halved signal frequencies. If there is a response at those
    # frequencies that is at least nearly as strong as the signal frequency, then the signal
    # frequency is probably a harmonic from a beep played at a lower frequency.
    all_bins = sig_bins[:] + [i // 2 for i in sig_bins]
    all_bin_indices = list(range(0, len(all_bins)))
    sig_bin_indices = list(range(0, len(sig_bins)))
    empty_bins = zeros(2 * len(all_bins))
    last_bins = empty_bins[:]  # Ensure to copy by value, not reference

    detections = DetectionState()
    last_peak = None
    peak_count = 0
    current_duty = None
    in_stream = open_input_stream(audio, options)

    while True:
        try:
            # Wait until we have enough samples to work with. Sleep at most 1/4 of the duration
            # of one audio chunk, otherwise input overflow risk increases.
            while in_stream.get_read_available() < NUM_SAMPLES:
                sleep(0.005)
        except IOError as err:
            # Most likely an overflow despite my attempts to avoid them. Only try to reopen the
            # stream once, because it could also be the sound device having been unplugged or
            # some other fatal error, and we don't want to hog the CPU with futile attempts to
            # recover in such cases.
            LOG.error(
                "Failed to probe stream: %s. Now retrying once to reopen stream...",
                err)
            in_stream = open_input_stream(audio, options)
            while in_stream.get_read_available() < NUM_SAMPLES:
                sleep(0.005)

        try:
            audio_data = frombuffer(in_stream.read(NUM_SAMPLES,
                                                   exception_on_overflow=True),
                                    dtype=short)
        except IOError as err:
            # I could restart the stream here, but the above except catcher already does it anyway.
            LOG.error("Could not read audio data: %s", err)
            continue

        # Each data point is a signed 16 bit number, so divide by 2^15 to get more reasonable FFT
        # values. Because our input is real (no imaginary component), we can ditch the redundant
        # second half of the FFT.
        intensity = abs(fft(audio_data / 32768.0)[:NUM_SAMPLES // 2])
        detections.time_increment()

        # Check any previously created requests to the PWM server.
        if future_countdowns:
            future_countdowns = deque([x - 1 for x in future_countdowns])
            # Handle at most one request per loop, CPU cycles are precioussss
            if future_countdowns[0] < 1:
                future_countdowns.popleft()
                future = futures.popleft()
                success = False
                try:
                    req = future.result()
                    if req.status_code != 200:
                        LOG.error(
                            "Request to PWM server failed with status %d",
                            req.status_code)
                    else:
                        success = True
                except requests.ConnectionError as err:
                    LOG.error("Could not connect to PWM server: %s", err)
                if not success and not future_countdowns and attempts_left:
                    # The request failed and no newer ones are queued. Because things are handled
                    # in a sequential manner, there is no risk of a race condition by retrying.
                    LOG.info("Retrying the request, %d attempts left",
                             attempts_left)
                    future_countdowns.append(request_countdown)
                    make_duty_request(futures, session, options, current_duty)
                    attempts_left -= 1

        if DETECT_CONTINUOUS:
            # Find the peak frequency. If the same one occurs loud enough for long enough,
            # assume the buzzer is playing a song and we should reset detection state.
            peak = intensity[TONE_BIN_LOWER:TONE_BIN_UPPER].argmax(
            ) + TONE_BIN_LOWER
            if intensity[peak] > sensitivity:
                if peak == last_peak:
                    peak_count += 1
                    if peak_count > 2:
                        LOG.debug(
                            "Reset because of continuous tone (bin %d, %dx)",
                            peak, peak_count)
                        detections.reset()
                        continue
                else:
                    last_peak = peak
                    peak_count = 1
            else:
                last_peak = None
                peak_count = 0

        # See if one of our signal frequencies occurred. Sum responses over current and previous
        # windows, to get a more consistent intensity value even when beep spans two windows.
        current_bins = [intensity[all_bins[i]] for i in all_bin_indices]
        total_bins = list(map(add, last_bins, current_bins))
        signals = [
            i for i in sig_bin_indices
            if total_bins[i] * sig_scales[i] > sensitivity
        ]
        last_bins = current_bins[:]

        if len(signals) != 1:  # either 'silence' or multiple signals
            # If multiple occurred simultaneously, assume it is loud noise and treat as silence.
            # This means that unless you're using an electrical connection instead of a
            # microphone, a loud clap at the exact moment a beep is played, may cause its
            # detection to be missed. This seems lower risk than allowing any loud noise to
            # appear to be a valid signal.
            if len(signals) > 1:
                LOG.debug("Ignoring %d simultaneous signals", len(signals))
            # Check if we have a valid sequence
            duty = detections.check_silence()
            if duty is None:  # Nothing interesting happened
                continue
            last_bins = empty_bins[:]
            if duty is not False and server_ip:
                current_duty = duty
                future_countdowns.append(request_countdown)
                make_duty_request(futures, session, options, duty)
                attempts_left = 2
        else:  # 1 signal
            harmonic_ratio = total_bins[NUM_SIGNALS +
                                        signals[0]] / total_bins[signals[0]]
            if harmonic_ratio > HARMONIC_FACTOR:
                LOG.debug(
                    "Reset because apparent signal %d is actually a harmonic (%.1f)",
                    signals[0], harmonic_ratio)
                detections.reset()
                continue
            if not detections.check_signal(signals[0]):
                last_bins = empty_bins[:]
Ejemplo n.º 8
0
 def _get_session(self):
     session = FuturesSession()
     session.auth = HTTPBasicAuth(self.api_key, self.api_key)
     if self.proxies and (self.proxies.get('http') or self.proxies.get('https')):
         session.proxies = self.proxies
     return session