Beispiel #1
0
 def __exit__(self, type, value, traceback):
     if self.expected == 0:
         with Timeout(.5, None):
             self.event.wait()
         assert(len(self.received) == 0)
         return
     with Timeout(1):
         self.event.wait()
         self.event = Event()
    def test_nested_timeout(self):
        with Timeout(DELAY, False):
            with Timeout(DELAY*2, False):
                sleep(DELAY*3)
            raise AssertionError('should not get there')

        with Timeout(DELAY) as t1:
            with Timeout(DELAY*2) as t2:
                try:
                    sleep(DELAY*3)
                except Timeout, ex:
                    assert ex is t1, (ex, t1)
                assert not t1.pending, t1
                assert t2.pending, t2
            assert not t2.pending, t2
Beispiel #3
0
    def get_groups(self, env, token, memcache_client=None):
        """
        Get groups for the given token.

        If memcache_client is set, token credentials will be cached
        appropriately.

        With a cache miss, or no memcache_client, the configurated external
        authentication server will be queried for the group information.

        :param token: Token to validate and return a group string for.
        :param memcache_client: Memcached client to use for caching token
                                credentials; None if no caching is desired.
        :returns: None if the token is invalid or a string containing a comma
                  separated list of groups the authenticated user is a member
                  of. The first group in the list is also considered a unique
                  identifier for that user.
        """
        groups = None
        key = '%s/token/%s' % (self.reseller_prefix, token)
        cached_auth_data = memcache_client and memcache_client.get(key)
        if cached_auth_data:
            start, expiration, groups = cached_auth_data
            if time() - start > expiration:
                groups = None

        headers = {}
        if env.get('HTTP_AUTHORIZATION'):
            groups = None
            headers["Authorization"] = env.get('HTTP_AUTHORIZATION')

        if not groups:
            with Timeout(self.timeout):
                conn = http_connect(self.auth_host,
                                    self.auth_port,
                                    'GET',
                                    '%stoken/%s' % (self.auth_prefix, token),
                                    headers,
                                    ssl=self.ssl)

                resp = conn.getresponse()
                resp.read()
                conn.close()
            if resp.status // 100 != 2:
                return None
            expiration = float(resp.getheader('x-auth-ttl'))
            groups = resp.getheader('x-auth-groups')
            if memcache_client:
                memcache_client.set(key, (time(), expiration, groups),
                                    timeout=expiration)

        if env.get('HTTP_AUTHORIZATION'):
            account, user, sign = \
                env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
            cfaccount = resp.getheader('x-auth-account-suffix')
            path = env['PATH_INFO']
            env['PATH_INFO'] = \
                path.replace("%s:%s" % (account, user), cfaccount, 1)

        return groups
Beispiel #4
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [
                Instance.load(context, instance_id)
                for instance_id in instance_ids
            ]

            member_ips = [self.get_ip(instance) for instance in instances]
            guests = [self.get_guest(instance) for instance in instances]

            # Users to be configured for password-less SSH.
            authorized_users_without_password = ['root', 'dbadmin']

            # Configuring password-less SSH for cluster members.
            # Strategy for setting up SSH:
            # get public keys for user from member-instances in cluster,
            # combine them, finally push it back to all instances,
            # and member instances add them to authorized keys.
            LOG.debug("Configuring password-less SSH on cluster members.")
            try:
                for user in authorized_users_without_password:
                    pub_key = [guest.get_public_keys(user) for guest in guests]
                    for guest in guests:
                        guest.authorize_public_keys(user, pub_key)

                LOG.debug("Installing cluster with members: %s." % member_ips)
                guests[0].install_cluster(member_ips)

                LOG.debug("Finalizing cluster configuration.")
                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Beispiel #5
0
 def request_to_ks(self, req, servers, port):
     """ Routing multiple keystone servers.
     """
     succ_resps = []
     fail_resps = []
     auth_tokens = self._split_auth_token(req, servers)
     bodies = self._split_body(req, servers)
     for site, token, body in zip(servers, auth_tokens, bodies):
         for node in site:
             parsed = urlparse(self._combinate_ks_url(node, port, req))
             connector = HTTPSConnection if parsed.scheme == 'https' else HTTPConnection
             try:
                 with ConnectionTimeout(self.conn_timeout):
                     (host, port) = parsed.netloc.split(':')
                     headers = req.headers
                     if req.headers.has_key('Host'):
                         headers['Host'] = host + ':' + str(port)
                     if token:
                         headers['X-Auth-Token'] = token
                     if req.headers.has_key('Content-Length'):
                         del headers['Content-Length']
                     conn = connector(host, port)
                     conn.request(req.method, parsed.path, body, headers)
                     with Timeout(self.timeout):
                         resp = conn.getresponse()
                         if resp.status >= 200 and resp.status <= 300:
                             succ_resps.append(resp)
                             break
                         else:
                             fail_resps.append(resp)
             except ValueError, err:
                 fail_resps.append(HTTPPreconditionFailed(request=req))
             except (Exception, TimeoutError), err:
                 fail_resps.append(HTTPServiceUnavailable(request=req))
Beispiel #6
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.pop('timeout', 30)

    def cb_timeout():
        msg = (_("Time out after waiting"
                 " %(time)s seconds when running proc: %(args)s"
                 " %(kwargs)s") % {
                     'time': time,
                     'args': args,
                     'kwargs': kwargs
                 })
        LOG.error(msg)
        raise exception.ProcessExecutionError(msg)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except Timeout as t:
        if t is not timeout:
            LOG.error("Timeout reached but not from our timeout. This is bad!")
            raise
        else:
            msg = (_("Time out after waiting "
                     "%(time)s seconds when running proc: %(args)s"
                     " %(kwargs)s") % {
                         'time': time,
                         'args': args,
                         'kwargs': kwargs
                     })
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
 def __call__(self, *args, **kwargs):
     args_hash = json.dumps((args, kwargs), sort_keys=True)
     timeout = None
     incache, cached_response = self.get_from_cache(args_hash)
     if self.use_cache_first and incache:
         return cached_response
     if self.failover_timeout and incache:
         # we have a value, let's set a timeout and use our cache if service
         # isn't responsive
         timeout = Timeout(self.failover_timeout)
     try:
         reply = super(CachedMethodProxy, self).__call__(*args, **kwargs)
     except (Exception, Timeout) as e:
         error = 'timeout' if isinstance(e, Timeout) else 'error'
         _log.warn('%s when getting value for %s. using cache', error, self)
         if incache:
             _log.warn('response found in cache. using cache')
             return cached_response
         else:
             _log.error('request not in cache, re-raising %s', self.cache)
             raise e
     finally:
         if timeout:
             timeout.cancel()
     self.cache[args_hash] = (reply, )
     return reply
Beispiel #8
0
    def rengine_side(self, appid, token, uri):
        """ Handle rengine (client) GET requests """
        if not self.rengine_authorization_ok(appid, token):
            LOGGER.info('Rengine content request authorization fails')
            abort(401, 'Authorization failed')

        evt = Event()
        request_id = str(uuid4())
        self.request_id_events[request_id] = evt

        headers = [
            "%s: %s" % (header, val)
            for (header, val) in request.headers.items()
        ]
        packet = ScpPacket.make_sfkcontent(uri, request_id, headers)
        try:
            self._send(packet, appid)
        except Exception as e:
            abort(500, str(e))

        LOGGER.debug("uri %s expected" % uri)
        timeout = Timeout(TIMEOUT)
        try:
            resp = evt.wait()
        except Timeout:
            del self.request_id_events[request_id]
            abort(504, 'Gateway Timeout')
        finally:
            timeout.cancel()

        LOGGER.debug("uri %s got" % uri)

        return resp
Beispiel #9
0
    def piper(self, in_sock, out_sock, out_addr, onkill):
        "Worker thread for data reading"
        try:
            timeout = Timeout(self.transmission_timeout_seconds)
            try:
                while True:
                    written = in_sock.recv(32768)
                    if not written:
                        try:
                            out_sock.shutdown(socket.SHUT_WR)
                        except socket.error:
                            self.threads[onkill].kill()
                        break
                    try:
                        out_sock.sendall(written)
                    except socket.error:
                        pass
                    self.data_handled += len(written)
            finally:
                timeout.cancel()
        except greenlet.GreenletExit:
            return
        except Timeout:
            # This one prevents only from closing connection without any data nor status code returned
            # from mantrid when no data was received from backend.
            # When it happens, nginx reports 'upstream prematurely closed connection' and returns 500,
            # and want to have our custom error page to know when it happens.

            if onkill == "stoc" and self.data_handled == 0:
                out_sock.sendall(
                    "HTTP/1.0 594 Backend timeout\r\nConnection: close\r\nContent-length: 0\r\n\r\n"
                )
            logging.warn("Timeout serving request to backend %s of %s",
                         self.backend, self.host)
            return
Beispiel #10
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):

        def _grow_cluster():
            LOG.debug("begin grow_cluster for Vertica cluster %s", cluster_id)

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for new cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_insts = [Instance.load(context, instance_id)
                         for instance_id in new_instance_ids]

            existing_instances = [Instance.load(context, instance_id)
                                  for instance_id
                                  in instance_ids
                                  if instance_id not in new_instance_ids]

            existing_guests = [self.get_guest(i) for i in existing_instances]
            new_guests = [self.get_guest(i) for i in new_insts]
            all_guests = new_guests + existing_guests

            authorized_users_without_password = ['root', 'dbadmin']
            new_ips = [self.get_ip(instance) for instance in new_insts]

            for user in authorized_users_without_password:
                pub_key = [guest.get_public_keys(user) for guest in all_guests]
                for guest in all_guests:
                    guest.authorize_public_keys(user, pub_key)

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    LOG.debug("Found 'master' instance, calling grow on guest")
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    self.get_guest(master_instance).grow_cluster(new_ips)
                    break

            for guest in new_guests:
                guest.cluster_complete()

        timeout = Timeout(CONF.cluster_usage_timeout)

        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error growing cluster %s."), cluster_id)
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()
Beispiel #11
0
 def spawn_accepts():
     events = []
     for _junk in xrange(2):
         with Timeout(3):
             sock, addr = bindsock.accept()
             events.append(spawn(accept, sock, addr))
     return events
Beispiel #12
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.pop('timeout', 30)
    log_output_on_error = kwargs.pop('log_output_on_error', False)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except exception.ProcessExecutionError as e:
        if log_output_on_error:
            LOG.error(
                _("Command '%(cmd)s' failed. %(description)s "
                  "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
                  "stdout: %(stdout)s") %
                {'cmd': e.cmd, 'description': e.description or '',
                 'exit_code': e.exit_code, 'stderr': e.stderr,
                 'stdout': e.stdout})
        raise
    except Timeout as t:
        if t is not timeout:
            LOG.error(_("Got a timeout but not the one expected."))
            raise
        else:
            msg = (_("Time out after waiting "
                     "%(time)s seconds when running proc: %(args)s"
                     " %(kwargs)s.") % {'time': time, 'args': args,
                                        'kwargs': kwargs})
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
Beispiel #13
0
 def test_raising_exception_class(self):
     # Providing classes instead of values should be possible too:
     try:
         with Timeout(DELAY, ValueError):
             sleep(DELAY * 2)
     except ValueError:
         pass
Beispiel #14
0
 def _connect_put_node(self, nodes, part, path, headers,
                       logger_thread_locals):
     """Method for a file PUT connect"""
     self.app.logger.thread_locals = logger_thread_locals
     for node in nodes:
         try:
             start_time = time.time()
             with ConnectionTimeout(self.app.conn_timeout):
                 conn = http_connect(
                     node['ip'], node['port'], node['device'], part, 'PUT',
                     path, headers)
             self.app.set_node_timing(node, time.time() - start_time)
             with Timeout(self.app.node_timeout):
                 resp = conn.getexpect()
             if resp.status == HTTP_CONTINUE:
                 conn.resp = None
                 conn.node = node
                 return conn
             elif is_success(resp.status):
                 conn.resp = resp
                 conn.node = node
                 return conn
             elif headers['If-None-Match'] is not None and \
                     resp.status == HTTP_PRECONDITION_FAILED:
                 conn.resp = resp
                 conn.node = node
                 return conn
             elif resp.status == HTTP_INSUFFICIENT_STORAGE:
                 self.app.error_limit(node, _('ERROR Insufficient Storage'))
         except (Exception, Timeout):
             self.app.exception_occurred(
                 node, _('Object'),
                 _('Expect: 100-continue on %s') % path)
Beispiel #15
0
 def _make_request(self, nodes, part, method, path, headers, query,
                   logger_thread_locals):
     self.app.logger.thread_locals = logger_thread_locals
     for node in nodes:
         try:
             start_node_timing = time.time()
             with ConnectionTimeout(self.app.conn_timeout):
                 conn = http_connect(node['ip'],
                                     node['port'],
                                     node['device'],
                                     part,
                                     method,
                                     path,
                                     headers=headers,
                                     query_string=query)
                 conn.node = node
             self.app.set_node_timing(node, time.time() - start_node_timing)
             with Timeout(self.app.node_timeout):
                 resp = conn.getresponse()
                 if not is_informational(resp.status) and \
                         not is_server_error(resp.status):
                     return resp.status, resp.reason, resp.read()
                 elif resp.status == HTTP_INSUFFICIENT_STORAGE:
                     self.error_limit(node, _('ERROR Insufficient Storage'))
         except (Exception, Timeout):
             self.exception_occurred(
                 node, self.server_type,
                 _('Trying to %(method)s %(path)s') % {
                     'method': method,
                     'path': path
                 })
Beispiel #16
0
def fetchBitcoinChartsData():
    global ba

    if 'bitcoincharts' not in ba.api_parsers.API_QUERY_CACHE:
        ba.api_parsers.API_QUERY_CACHE['bitcoincharts'] = {
            'last_call_timestamp': 0,
            'result': None,
            'call_fail_count': 0,
        }

    current_timestamp = int(time.time())
    if (ba.api_parsers.API_QUERY_CACHE['bitcoincharts']['last_call_timestamp']
            + ba.api_parsers.API_QUERY_FREQUENCY['bitcoincharts'] >
            current_timestamp):
        result = ba.api_parsers.API_QUERY_CACHE['bitcoincharts']['result']
    else:
        with Timeout(API_CALL_TIMEOUT_THRESHOLD, CallTimeoutException):
            response = urllib2.urlopen(
                urllib2.Request(url=BITCOIN_CHARTS_API_URL,
                                headers=API_REQUEST_HEADERS)).read()
            result = json.loads(response)

        ba.api_parsers.API_QUERY_CACHE['bitcoincharts'] = {
            'last_call_timestamp': current_timestamp,
            'result': result,
            'call_fail_count': 0,
        }

    return result
Beispiel #17
0
 def _connect_put_node(self,
                       host,
                       port,
                       method,
                       path,
                       headers,
                       query_string,
                       ssl=False):
     try:
         with ConnectionTimeout(self.conn_timeout):
             conn = http_connect_raw(host,
                                     port,
                                     method,
                                     path,
                                     headers=headers,
                                     query_string=query_string,
                                     ssl=ssl)
             if headers.has_key('content-length') and int(
                     headers['content-length']) == 0:
                 return conn
         with Timeout(self.node_timeout):
             resp = conn.getexpect()
         if resp.status == 100:
             return conn
         elif resp.status == 507:
             self.logger.error('507 Insufficient Storage in %s:%s%s' %
                               (host, port, path))
             raise Exception
     except:
         self.logger.error('Expect: 100-continue on %s:%s%s' %
                           (host, port, path))
         return None
Beispiel #18
0
 def test_ref(self):
     err = Error()
     err_ref = weakref.ref(err)
     with Timeout(DELAY * 2, err):
         sleep(DELAY)
     del err
     assert not err_ref(), repr(err_ref())
Beispiel #19
0
 def test_raising_custom_exception(self):
     # You can customize the exception raised:
     try:
         with Timeout(DELAY, IOError("Operation takes way too long")):
             sleep(DELAY * 2)
     except IOError as ex:
         assert str(ex) == "Operation takes way too long", repr(ex)
Beispiel #20
0
    def portal_types(self):
        """ Mapping between history id and portal_type
        """
        if self._portal_types:
            return self._portal_types

        shadow = self.storage._getShadowStorage()
        histIds = shadow._storage

        self._portal_types = dict(PORTAL_TYPES.items())
        for hid in histIds.keys():
            if hid in self._portal_types:
                continue

            with Timeout(10):
                try:
                    ob = self.storage.retrieve(hid).object.object
                except (BrokenModified, POSKeyError):
                    logger.warn("BrokenModified raised for historyid: %s", hid)
                    continue
                except Exception as err:
                    logger.exception(err)
                    continue
            if not ob:
                logger.warn("Timeout raised for history id: %s", hid)
                continue

            if isinstance(ob, Removed):
                continue

            ptype = ob.getPortalTypeName()
            logger.warn("Adding hid - portal_type mapping: %s = %s", hid, ptype)
            self._portal_types[hid] = ptype

        return self._portal_types
Beispiel #21
0
    def shrink_cluster(self, context, cluster_id, instance_ids):
        LOG.debug("begin shrink_cluster for MongoDB cluster %s", cluster_id)

        def _shrink_cluster():
            def all_instances_marked_deleted():
                non_deleted_instances = DBInstance.find_all(
                    cluster_id=cluster_id, deleted=False).all()
                non_deleted_ids = [db_instance.id for db_instance
                                   in non_deleted_instances]
                return not bool(
                    set(instance_ids).intersection(set(non_deleted_ids))
                )
            try:
                utils.poll_until(all_instances_marked_deleted,
                                 sleep_time=2,
                                 time_out=CONF.cluster_delete_time_out)
            except PollTimeOut:
                LOG.error(_("timeout for instances to be marked as deleted."))
                return

        cluster_usage_timeout = CONF.cluster_usage_timeout
        timeout = Timeout(cluster_usage_timeout)
        try:
            _shrink_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("timeout for shrinking cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("end shrink_cluster for MongoDB cluster %s", self.id)
 def test_raising_self_true(self):
     # specifying True as the exception raises self as well
     try:
         with Timeout(DELAY, True) as t:
             sleep(DELAY*2)
     except Timeout, ex:
         assert ex is t, (ex, t)
 def test_raising_self(self):
     # An exception will be raised if it's not
     try:
         with Timeout(DELAY) as t:
             sleep(DELAY*2)
     except Timeout, ex:
         assert ex is t, (ex, t)
Beispiel #24
0
    def account_update(self, req, account, container, broker):
        """
        Update the account server with latest container info.

        :param req: webob.Request object
        :param account: account name
        :param container: container name
        :param borker: container DB broker object
        :returns: if the account request returns a 404 error code,
                  HTTPNotFound response object, otherwise None.
        """
        account_host = req.headers.get('X-Account-Host')
        account_partition = req.headers.get('X-Account-Partition')
        account_device = req.headers.get('X-Account-Device')
        if all([account_host, account_partition, account_device]):
            account_ip, account_port = account_host.rsplit(':', 1)
            new_path = '/' + '/'.join([account, container])
            info = broker.get_info()
            account_headers = {
                'x-put-timestamp': info['put_timestamp'],
                'x-delete-timestamp': info['delete_timestamp'],
                'x-object-count': info['object_count'],
                'x-bytes-used': info['bytes_used'],
                'x-cf-trans-id': req.headers.get('X-Cf-Trans-Id', '-')
            }
            if req.headers.get('x-account-override-deleted', 'no').lower() == \
                    'yes':
                account_headers['x-account-override-deleted'] = 'yes'
            try:
                with ConnectionTimeout(self.conn_timeout):
                    conn = http_connect(account_ip, account_port,
                                        account_device, account_partition,
                                        'PUT', new_path, account_headers)
                with Timeout(self.node_timeout):
                    account_response = conn.getresponse()
                    account_response.read()
                    if account_response.status == 404:
                        return HTTPNotFound(request=req)
                    elif account_response.status < 200 or \
                            account_response.status > 299:
                        self.logger.error(
                            _('ERROR Account update failed '
                              'with %(ip)s:%(port)s/%(device)s (will retry '
                              'later): Response %(status)s %(reason)s'), {
                                  'ip': account_ip,
                                  'port': account_port,
                                  'device': account_device,
                                  'status': account_response.status,
                                  'reason': account_response.reason
                              })
            except (Exception, TimeoutError):
                self.logger.exception(
                    _('ERROR account update failed with '
                      '%(ip)s:%(port)s/%(device)s (will retry later)'), {
                          'ip': account_ip,
                          'port': account_port,
                          'device': account_device
                      })
        return None
Beispiel #25
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # Connect nodes to the first node
            guests = [self.get_guest(instance) for instance in instances]
            try:
                cluster_head = instances[0]
                cluster_head_port = '6379'
                cluster_head_ip = self.get_ip(cluster_head)
                for guest in guests[1:]:
                    guest.cluster_meet(cluster_head_ip, cluster_head_port)

                num_nodes = len(instances)
                total_slots = 16384
                slots_per_node = total_slots / num_nodes
                leftover_slots = total_slots % num_nodes
                first_slot = 0
                for guest in guests:
                    last_slot = first_slot + slots_per_node
                    if leftover_slots > 0:
                        leftover_slots -= 1
                    else:
                        last_slot -= 1
                    guest.cluster_addslots(first_slot, last_slot)
                    first_slot = last_slot + 1

                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Beispiel #26
0
def write_html_currency_pages():
    global ba
    today = datetime.datetime.today()

    template_file_path = os.path.join(ba.server.WWW_DOCUMENT_ROOT, '_currency_page_template.htm')
    with open(template_file_path, 'r') as template_file:
        template = template_file.read()

    api_all_url = '{}ticker/all'.format(ba.server.API_INDEX_URL)

    try:
        with Timeout(API_CALL_TIMEOUT_THRESHOLD, CallTimeoutException):
            response = urllib2.urlopen(urllib2.Request(url=api_all_url, headers=API_REQUEST_HEADERS)).read()
            all_rates = json.loads(response)
    except (CallTimeoutException,
            socket.error,
            urllib2.URLError,
            httplib.BadStatusLine,
            simplejson.decoder.JSONDecodeError):
        return None

    if not os.path.exists(os.path.join(ba.server.WWW_DOCUMENT_ROOT, ba.config.CURRENCY_DUMMY_PAGES_SUBFOLDER_NAME)):
        os.makedirs(os.path.join(ba.server.WWW_DOCUMENT_ROOT, ba.config.CURRENCY_DUMMY_PAGES_SUBFOLDER_NAME))

    for currency_code in ba.config.CURRENCY_LIST:
        currency_rate = all_rates[currency_code]['last']
        currency_page_contents = template
        currency_page_contents = currency_page_contents.replace('$RATE$', str(Decimal(currency_rate).quantize(ba.config.DEC_PLACES)))
        currency_page_contents = currency_page_contents.replace('$CURRENCY_CODE$', currency_code)
        currency_page_contents = currency_page_contents.replace('$GENERATION_DATETIME$', today.strftime('%Y-%m-%dT%H:%M'))

        with open(os.path.join(ba.server.WWW_DOCUMENT_ROOT,
                               ba.config.CURRENCY_DUMMY_PAGES_SUBFOLDER_NAME,
                               ('%s.htm' % currency_code.lower())), 'w') as currency_page_file:
            currency_page_file.write(currency_page_contents)

    template_file_path = os.path.join(ba.server.WWW_DOCUMENT_ROOT, '_charts_page_template.htm')
    with open(template_file_path, 'r') as template_file:
        template = template_file.read()

    if not os.path.exists(os.path.join(ba.server.WWW_DOCUMENT_ROOT, ba.config.CHARTS_DUMMY_PAGES_SUBFOLDER_NAME)):
        os.makedirs(os.path.join(ba.server.WWW_DOCUMENT_ROOT, ba.config.CHARTS_DUMMY_PAGES_SUBFOLDER_NAME))

    index = 0
    for currency_code in ba.config.CURRENCY_LIST:
        currency_rate = all_rates[currency_code]['last']
        chart_page_contents = template
        chart_page_contents = chart_page_contents.replace('$RATE$', str(Decimal(currency_rate).quantize(ba.config.DEC_PLACES)))
        chart_page_contents = chart_page_contents.replace('$CURRENCY_CODE$', currency_code)
        chart_page_contents = chart_page_contents.replace('$GENERATION_DATETIME$', today.strftime('%Y-%m-%dT%H:%M'))
        with open(os.path.join(ba.server.WWW_DOCUMENT_ROOT,
                               ba.config.CHARTS_DUMMY_PAGES_SUBFOLDER_NAME,
                               ('%s.htm' % currency_code.lower())), 'w') as chart_page_file:
            chart_page_file.write(chart_page_contents)


        index = index + 1
        if index == ba.config.FRONTEND_MAJOR_CURRENCIES:
            break
Beispiel #27
0
 def fetch(self, url, header):
     # Fetch HTTP page with eventlet pool
     response = ''
     with Timeout(60, False):
         req = Request(url, None, header)
         response = urlopen(req).read()
         response = response.decode()
     return response
 def _wait_for_read_with_timeout(self, fd):
     r, w, e = select.select([fd], [], [], self.timeout)
     if len(r) == 0:
         if self.task_id:
             self._cancel()
         raise Timeout('Timeout while waiting for storlet output')
     if fd in r:
         return
Beispiel #29
0
def write_fiat_rates_config():
    global ba
    js_config_template = "var fiatCurrencies = $FIAT_CURRENCIES_DATA$;"

    currencies_names_URL = 'http://openexchangerates.org/api/currencies.json'
    currencies_rates_URL = 'http://openexchangerates.org/api/latest.json?app_id={app_id}'.format(
        app_id=OPENEXCHANGERATES_APP_ID)

    currency_data_list = {}

    try:
        with Timeout(API_CALL_TIMEOUT_THRESHOLD, CallTimeoutException):
            response = urllib2.urlopen(
                urllib2.Request(url=currencies_names_URL,
                                headers=API_REQUEST_HEADERS)).read()
            currencies_names = json.loads(response)

        with Timeout(API_CALL_TIMEOUT_THRESHOLD, CallTimeoutException):
            response = urllib2.urlopen(
                urllib2.Request(url=currencies_rates_URL,
                                headers=API_REQUEST_HEADERS)).read()
            currencies_rates = json.loads(response)
    except (CallTimeoutException, socket.error, urllib2.URLError,
            httplib.BadStatusLine, ValueError):
        return None

    for currency_code in currencies_names:
        try:
            currency_data_list[currency_code] = {
                'name': currencies_names[currency_code],
                'rate': str(currencies_rates['rates'][currency_code]),
            }
        except (KeyError, TypeError):
            return None

    config_string = js_config_template
    config_string = config_string.replace('$FIAT_CURRENCIES_DATA$',
                                          json.dumps(currency_data_list))

    with open(os.path.join(ba.server.WWW_DOCUMENT_ROOT, 'js', 'fiat_data.js'),
              'w') as fiat_exchange_config_file:
        fiat_exchange_config_file.write(config_string)

    with open(os.path.join(ba.server.API_DOCUMENT_ROOT, 'fiat_data'),
              'w') as fiat_exchange_api_file:
        fiat_exchange_api_file.write(json.dumps(currency_data_list))
Beispiel #30
0
    def test_spin(self):
        "Tests the Spin action"
        # Set the balancer up to return a Spin
        balancer = MockBalancer()
        action = Spin(balancer,
                      "aeracode.org",
                      "aeracode.org",
                      timeout=2,
                      check_interval=1)
        balancer.fixed_action = action
        # Ensure it times out
        sock = MockSocket()
        try:
            with Timeout(2.2):
                start = time.time()
                action.handle(sock, "", "/", {})
                duration = time.time() - start
        except Timeout:
            self.fail("Spin lasted for too long")
        self.assert_(duration >= 1, "Spin did not last for long enough")
        self.assertEqual(
            open(
                os.path.join(os.path.dirname(__file__), "..", "static",
                             "timeout.http")).read(),
            sock.data,
        )
        # Now, ensure it picks up a change
        sock = MockSocket()
        try:
            with Timeout(2):

                def host_changer():
                    eventlet.sleep(0.7)
                    balancer.fixed_action = Empty(balancer,
                                                  "aeracode.org",
                                                  "aeracode.org",
                                                  code=402)

                eventlet.spawn(host_changer)
                action.handle(sock, "", "/", {})
        except Timeout:
            self.fail("Spin lasted for too long")
        self.assertEqual(
            "HTTP/1.0 402 Payment Required\r\nConnection: close\r\nContent-length: 0\r\n\r\n",
            sock.data,
        )