Exemplo n.º 1
0
def read_metadata_info(config, cloud_config):
    url = "https://{}:{}/metadata".format(config.host, config.port)
    timeout = cloud_config['connect_timeout'] if 'connect_timeout' in cloud_config else 5
    try:
        response = urlopen(url, context=config.ssl_context, timeout=timeout)
    except Exception as e:
        log.exception(e)
        raise DriverException("Unable to connect to the metadata service at %s. "
                              "Check the cluster status in the Constellation cloud console. " % url)

    if response.code != 200:
        raise DriverException(("Error while fetching the metadata at: %s. "
                               "The service returned error code %d." % (url, response.code)))
    return parse_metadata_info(config, response.read().decode('utf-8'))
Exemplo n.º 2
0
 def recv_results_rows(cls, f, protocol_version, user_type_map,
                       result_metadata):
     paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(
         f, user_type_map)
     column_metadata = column_metadata or result_metadata
     rowcount = read_int(f)
     rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
     colnames = [c[2] for c in column_metadata]
     coltypes = [c[3] for c in column_metadata]
     try:
         parsed_rows = [
             tuple(
                 ctype.from_binary(val, protocol_version)
                 for ctype, val in zip(coltypes, row)) for row in rows
         ]
     except Exception:
         for row in rows:
             for i in range(len(row)):
                 try:
                     coltypes[i].from_binary(row[i], protocol_version)
                 except Exception as e:
                     raise DriverException(
                         'Failed decoding result column "%s" of type %s: %s'
                         % (colnames[i],
                            coltypes[i].cql_parameterized_type(), str(e)))
     return paging_state, coltypes, (colnames,
                                     parsed_rows), result_metadata_id
Exemplo n.º 3
0
 def recv_results_rows(self, f, protocol_version, user_type_map,
                       result_metadata):
     self.recv_results_metadata(f, user_type_map)
     column_metadata = self.column_metadata or result_metadata
     rowcount = read_int(f)
     rows = [
         self.recv_row(f, len(column_metadata)) for _ in range(rowcount)
     ]
     self.column_names = [c[2] for c in column_metadata]
     self.column_types = [c[3] for c in column_metadata]
     try:
         self.parsed_rows = [
             tuple(
                 ctype.from_binary(val, protocol_version)
                 for ctype, val in zip(self.column_types, row))
             for row in rows
         ]
     except Exception:
         for row in rows:
             for i in range(len(row)):
                 try:
                     self.column_types[i].from_binary(
                         row[i], protocol_version)
                 except Exception as e:
                     raise DriverException(
                         'Failed decoding result column "%s" of type %s: %s'
                         % (self.column_names[i],
                            self.column_types[i].cql_parameterized_type(),
                            str(e)))
Exemplo n.º 4
0
 def recv(self, f, protocol_version, user_type_map, result_metadata):
     if self.kind == RESULT_KIND_VOID:
         return
     elif self.kind == RESULT_KIND_ROWS:
         self.recv_results_rows(f, protocol_version, user_type_map,
                                result_metadata)
     elif self.kind == RESULT_KIND_SET_KEYSPACE:
         self.new_keyspace = read_string(f)
     elif self.kind == RESULT_KIND_PREPARED:
         self.recv_results_prepared(f, protocol_version, user_type_map)
     elif self.kind == RESULT_KIND_SCHEMA_CHANGE:
         self.recv_results_schema_change(f, protocol_version)
     else:
         raise DriverException("Unknown RESULT kind: %d" % self.kind)
Exemplo n.º 5
0
def get_cloud_config(cloud_config, create_pyopenssl_context=False):
    if not _HAS_SSL:
        raise DriverException("A Python installation with SSL is required to connect to a cloud cluster.")

    if 'secure_connect_bundle' not in cloud_config:
        raise ValueError("The cloud config doesn't have a secure_connect_bundle specified.")

    try:
        config = read_cloud_config_from_zip(cloud_config, create_pyopenssl_context)
    except BadZipFile:
        raise ValueError("Unable to open the zip file for the cloud config. Check your secure connect bundle.")

    config = read_metadata_info(config, cloud_config)
    if create_pyopenssl_context:
        config.ssl_context = config.pyopenssl_context
    return config
Exemplo n.º 6
0
    def encode_header(self, buffer, payload_length, uncompressed_length, is_self_contained):
        if payload_length > Segment.MAX_PAYLOAD_LENGTH:
            raise DriverException('Payload length exceed Segment.MAX_PAYLOAD_LENGTH')

        header_data = payload_length

        flag_offset = self.FLAG_OFFSET
        if self.compression:
            header_data |= uncompressed_length << flag_offset
            flag_offset += 17

        if is_self_contained:
            header_data |= 1 << flag_offset

        write_uint_le(buffer, header_data, size=self.header_length)
        header_crc = compute_crc24(header_data, self.header_length)
        write_uint_le(buffer, header_crc, size=CRC24_LENGTH)
Exemplo n.º 7
0
 def recv_body(cls, f, protocol_version, user_type_map, result_metadata):
     kind = read_int(f)
     paging_state = None
     col_types = None
     if kind == RESULT_KIND_VOID:
         results = None
     elif kind == RESULT_KIND_ROWS:
         paging_state, col_types, results, result_metadata_id = cls.recv_results_rows(
             f, protocol_version, user_type_map, result_metadata)
     elif kind == RESULT_KIND_SET_KEYSPACE:
         ksname = read_string(f)
         results = ksname
     elif kind == RESULT_KIND_PREPARED:
         results = cls.recv_results_prepared(f, protocol_version, user_type_map)
     elif kind == RESULT_KIND_SCHEMA_CHANGE:
         results = cls.recv_results_schema_change(f, protocol_version)
     else:
         raise DriverException("Unknown RESULT kind: %d" % kind)
     return cls(kind, results, paging_state, col_types)
Exemplo n.º 8
0
def parse_metadata_info(config, http_data):
    try:
        data = json.loads(http_data)
    except:
        msg = "Failed to load cluster metadata"
        raise DriverException(msg)

    contact_info = data['contact_info']
    config.local_dc = contact_info['local_dc']

    proxy_info = contact_info['sni_proxy_address'].split(':')
    config.sni_host = proxy_info[0]
    try:
        config.sni_port = int(proxy_info[1])
    except:
        config.sni_port = 9042

    config.host_ids = [host_id for host_id in contact_info['contact_points']]

    return config
Exemplo n.º 9
0
    def write_version_and_event(self,
                                new_stored_event,
                                new_version_number=None,
                                max_retries=3,
                                artificial_failure_rate=0):
        """
        Writes entity version if not exists, and then writes the stored event.
        """
        # Write the next version.
        stored_entity_id = new_stored_event.stored_entity_id
        new_entity_version = None
        if self.always_write_entity_version and new_version_number is not None:
            assert isinstance(new_version_number, six.integer_types)
            #  - uses the "if not exists" optimistic concurrency control feature
            #    of Cassandra, hence this operation is assumed to succeed only once
            new_entity_version_id = self.make_entity_version_id(
                stored_entity_id, new_version_number)
            new_entity_version = CqlEntityVersion(
                r=new_entity_version_id,
                v=new_stored_event.event_id,
            )
            try:
                new_entity_version.save()
            except LWTException as e:
                raise ConcurrencyError(
                    "Version {} of entity {} already exists: {}".format(
                        new_entity_version, stored_entity_id, e))

        # Increased latency here causes increased contention.
        #  - used for testing concurrency exceptions
        if artificial_failure_rate:
            sleep(artificial_failure_rate)

        # Write the stored event into the database.
        try:
            retries = max_retries
            while True:
                try:
                    # Instantiate a Cassandra CQL engine object.
                    cql_stored_event = to_cql(new_stored_event)

                    # Optionally mimic an unreliable save() operation.
                    #  - used for testing retries
                    if artificial_failure_rate and (
                            random() > 1 - artificial_failure_rate):
                        raise DriverException("Artificial failure")

                    # Save the event.
                    cql_stored_event.save()

                except DriverException:

                    if retries <= 0:
                        # Raise the error after retries exhausted.
                        raise
                    else:
                        # Otherwise retry.
                        retries -= 1
                        sleep(0.05 + 0.1 * random())
                else:
                    break

        except DriverException as event_write_error:
            # If we get here, we're in trouble because the version has been
            # written, but perhaps not the event, so the entity may be broken
            # because it might not be possible to get the entity with version
            # number high enough to pass the optimistic concurrency check
            # when storing subsequent events.

            # Back off for a little bit.
            sleep(0.1)
            try:
                # If the event actually exists, despite the exception, all is well.
                CqlStoredEvent.get(n=new_stored_event.stored_entity_id,
                                   v=new_stored_event.event_id)

            except CqlStoredEvent.DoesNotExist:
                # Otherwise, try harder to recover by removing the new version.
                if new_entity_version is not None:
                    retries = max_retries * 3
                    while True:
                        try:
                            # Optionally mimic an unreliable delete() operation.
                            #  - used for testing retries
                            if artificial_failure_rate and (
                                    random() > 1 - artificial_failure_rate):
                                raise DriverException("Artificial failure")

                            # Delete the new entity version.
                            new_entity_version.delete()

                        except DriverException as version_delete_error:
                            # It's not going very well, so maybe retry.
                            if retries <= 0:
                                # Raise when retries are exhausted.
                                raise Exception(
                                    "Unable to delete version {} of entity {} after failing to write"
                                    "event: event write error {}: version delete error {}"
                                    .format(new_entity_version,
                                            stored_entity_id,
                                            event_write_error,
                                            version_delete_error))
                            else:
                                # Otherwise retry.
                                retries -= 1
                                sleep(0.05 + 0.1 * random())
                        else:
                            # The entity version was deleted, all is well.
                            break
                raise event_write_error