Пример #1
0
    def scan(self,
             context,
             table_name,
             condition_map,
             attributes_to_get=None,
             limit=None,
             exclusive_start_key=None,
             consistent=False):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        if exclusive_start_key is not None:
            self._validate_table_schema(table_info, exclusive_start_key)

        payload = dict(table_name=table_name,
                       condition_map=condition_map,
                       attributes_to_get=attributes_to_get,
                       limit=limit,
                       exclusive_start_key=exclusive_start_key,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_START, payload)

        with self.__task_semaphore:
            result = self._storage_driver.scan(context, table_info,
                                               condition_map,
                                               attributes_to_get, limit,
                                               exclusive_start_key, consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_END, payload)

        return result
Пример #2
0
    def _delete_item_async(self, context, table_info, key_attribute_map,
                           expected_condition_map=None):
        payload = dict(
            table_name=table_info.name,
            key_attribute_map=key_attribute_map,
            expected_condition_map=expected_condition_map
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_DELETEITEM_START,
                        payload)

        del_future = self._execute_async(
            self._storage_driver.delete_item,
            context, table_info, key_attribute_map, expected_condition_map
        )

        def callback(future):
            if not future.exception():
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_END,
                    payload
                )
            else:
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                    future.exception(), priority=notifier.PRIORITY_ERROR
                )

        del_future.add_done_callback(callback)
        return del_future
Пример #3
0
    def put_item(self, context, table_name, attribute_map, return_values=None,
                 if_not_exist=False, expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, attribute_map,
                                    keys_only=False)

        with self.__task_semaphore:
            result = self._storage_driver.put_item(
                context, table_info, attribute_map, return_values,
                if_not_exist, expected_condition_map
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_PUTITEM,
            dict(
                table_name=table_name,
                attribute_map=attribute_map,
                return_values=return_values,
                if_not_exist=if_not_exist,
                expected_condition_map=expected_condition_map
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result
Пример #4
0
    def _get_item_async(self,
                        context,
                        table_info,
                        hash_key,
                        range_key,
                        attributes_to_get,
                        consistent=True):
        payload = dict(table_name=table_info.name,
                       hash_key=hash_key,
                       range_key=range_key,
                       attributes_to_get=attributes_to_get,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_START,
                        payload)
        select_type = (SelectType.all() if attributes_to_get is None else
                       SelectType.specific_attributes(attributes_to_get))
        hash_key_condition_list = [IndexedCondition.eq(hash_key)]
        range_key_condition_list = (None if range_key is None else
                                    [IndexedCondition.eq(range_key)])

        result = self._execute_async(self._storage_driver.select_item,
                                     context,
                                     table_info,
                                     hash_key_condition_list,
                                     range_key_condition_list,
                                     select_type,
                                     consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_END,
                        payload)
        return result
Пример #5
0
    def _delete_item_async(self,
                           context,
                           table_info,
                           key_attribute_map,
                           expected_condition_map=None):
        payload = dict(table_name=table_info.name,
                       key_attribute_map=key_attribute_map,
                       expected_condition_map=expected_condition_map)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_DELETEITEM_START,
                        payload)

        del_future = self._execute_async(self._storage_driver.delete_item,
                                         context, table_info,
                                         key_attribute_map,
                                         expected_condition_map)

        def callback(future):
            if not future.exception():
                notifier.notify(context,
                                notifier.EVENT_TYPE_DATA_DELETEITEM_END,
                                payload)
            else:
                notifier.notify(context,
                                notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                                future.exception(),
                                priority=notifier.PRIORITY_ERROR)

        del_future.add_done_callback(callback)
        return del_future
Пример #6
0
    def scan(self, context, table_name, condition_map, attributes_to_get=None,
             limit=None, exclusive_start_key=None,
             consistent=False):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        if exclusive_start_key is not None:
            self._validate_table_schema(table_info, exclusive_start_key)

        payload = dict(table_name=table_name,
                       condition_map=condition_map,
                       attributes_to_get=attributes_to_get,
                       limit=limit,
                       exclusive_start_key=exclusive_start_key,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_START,
                        payload)

        with self.__task_semaphore:
            result = self._storage_driver.scan(
                context, table_info, condition_map, attributes_to_get,
                limit, exclusive_start_key, consistent
            )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_END,
                        payload)

        return result
Пример #7
0
    def _put_item_async(self,
                        context,
                        table_info,
                        attribute_map,
                        return_values=None,
                        if_not_exist=False,
                        expected_condition_map=None):
        payload = dict(table_name=table_info.name,
                       attribute_map=attribute_map,
                       return_values=return_values,
                       if_not_exist=if_not_exist,
                       expected_condition_map=expected_condition_map)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_PUTITEM_START,
                        payload)

        put_future = self._execute_async(self._storage_driver.put_item,
                                         context, table_info, attribute_map,
                                         return_values, if_not_exist,
                                         expected_condition_map)

        def callback(future):
            if not future.exception():
                notifier.notify(context, notifier.EVENT_TYPE_DATA_PUTITEM_END,
                                payload)
            else:
                notifier.notify(context,
                                notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                                payload=future.exception(),
                                priority=notifier.PRIORITY_ERROR)

        put_future.add_done_callback(callback)
        return put_future
Пример #8
0
    def _do_delete_table(self, context, table_info):
        self._storage_driver.delete_table(context, table_info)

        self._table_info_repo.delete(context, table_info.name)

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                        table_info.name)
Пример #9
0
    def put_item(self,
                 context,
                 table_name,
                 attribute_map,
                 return_values=None,
                 if_not_exist=False,
                 expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, attribute_map, keys_only=False)

        with self.__task_semaphore:
            result = self._storage_driver.put_item(context, table_info,
                                                   attribute_map,
                                                   return_values, if_not_exist,
                                                   expected_condition_map)
        notifier.notify(context,
                        notifier.EVENT_TYPE_DATA_PUTITEM,
                        dict(table_name=table_name,
                             attribute_map=attribute_map,
                             return_values=return_values,
                             if_not_exist=if_not_exist,
                             expected_condition_map=expected_condition_map),
                        priority=notifier.PRIORITY_DEBUG)

        return result
Пример #10
0
    def _put_item_async(self, context, table_info, attribute_map,
                        return_values=None, if_not_exist=False,
                        expected_condition_map=None):
        payload = dict(
            table_name=table_info.name,
            attribute_map=attribute_map,
            return_values=return_values,
            if_not_exist=if_not_exist,
            expected_condition_map=expected_condition_map
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_PUTITEM_START,
                        payload)

        put_future = self._execute_async(
            self._storage_driver.put_item,
            context, table_info, attribute_map, return_values,
            if_not_exist, expected_condition_map
        )

        def callback(future):
            if not future.exception():
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_PUTITEM_END,
                    payload
                )
            else:
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                    payload=future.exception(),
                    priority=notifier.PRIORITY_ERROR
                )

        put_future.add_done_callback(callback)
        return put_future
Пример #11
0
    def _do_delete_table(self, context, table_info):
        self._storage_driver.delete_table(context, table_info)

        self._table_info_repo.delete(context, table_info.name)

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                        table_info.name)
Пример #12
0
    def _get_item_async(self, context, table_info, hash_key, range_key,
                        attributes_to_get, consistent=True):
        payload = dict(table_name=table_info.name,
                       hash_key=hash_key,
                       range_key=range_key,
                       attributes_to_get=attributes_to_get,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_START,
                        payload)
        select_type = (
            SelectType.all() if attributes_to_get is None else
            SelectType.specific_attributes(attributes_to_get)
        )
        hash_key_condition_list = [IndexedCondition.eq(hash_key)]
        range_key_condition_list = (
            None if range_key is None else [IndexedCondition.eq(range_key)]
        )

        result = self._execute_async(
            self._storage_driver.select_item,
            context, table_info, hash_key_condition_list,
            range_key_condition_list, select_type, consistent=consistent
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_END,
                        payload)
        return result
Пример #13
0
 def callback(future):
     if not future.exception():
         notifier.notify(context, notifier.EVENT_TYPE_DATA_PUTITEM_END,
                         payload)
     else:
         notifier.notify(context,
                         notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                         payload=future.exception(),
                         priority=notifier.PRIORITY_ERROR)
Пример #14
0
    def execute_write_batch(self, context, write_request_map):
        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHWRITE_START,
                        write_request_map)
        write_request_list_to_send = []
        for table_name, write_request_list in write_request_map.iteritems():
            table_info = self._table_info_repo.get(context, table_name)
            for req in write_request_list:
                self._validate_table_is_active(table_info)

                if req.is_put:
                    self._validate_table_schema(table_info, req.attribute_map,
                                                keys_only=False)
                else:
                    self._validate_table_schema(table_info, req.attribute_map)

                write_request_list_to_send.append(
                    (table_info, req)
                )

        future_result_list = []
        for i in xrange(0, len(write_request_list_to_send),
                        self._batch_chunk_size):
            req_list = (
                write_request_list_to_send[i:i+self._batch_chunk_size]
            )

            future_result_list.append(
                self._batch_write_async(context, req_list)
            )

        unprocessed_items = {}
        for future_result in future_result_list:
            unprocessed_request_list = future_result.result()
            for (table_info, write_request) in unprocessed_request_list:
                table_name = table_info.name
                tables_unprocessed_items = (
                    unprocessed_items.get(table_name, None)
                )
                if tables_unprocessed_items is None:
                    tables_unprocessed_items = []
                    unprocessed_items[
                        table_name
                    ] = tables_unprocessed_items

                tables_unprocessed_items.append(write_request)

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHWRITE_END,
            dict(
                write_request_map=write_request_map,
                unprocessed_items=unprocessed_items
            )
        )

        return unprocessed_items
Пример #15
0
 def callback(future):
     if not future.exception():
         notifier.notify(
             context, notifier.EVENT_TYPE_DATA_DELETEITEM_END,
             payload
         )
     else:
         notifier.notify(
             context, notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
             future.exception(), priority=notifier.PRIORITY_ERROR
         )
Пример #16
0
 def callback(future):
     if not future.exception():
         self._table_info_repo.delete(context, table_info.name)
         notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                         table_info.name)
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
         self._table_info_repo.update(context, table_info, ["status"])
         notifier.notify(context,
                         notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                         future.exception(),
                         priority=notifier.PRIORITY_ERROR)
Пример #17
0
    def delete_table(self, context, table_name):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_START,
                        table_name)
        try:
            table_info = self._table_info_repo.get(context,
                                                   table_name,
                                                   ['status'])
        except TableNotExistsException as e:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            e.message, priority=notifier.PRIORITY_ERROR)
            raise

        if table_info.status == TableMeta.TABLE_STATUS_DELETING:
            # table is already being deleted, just return immediately
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                            table_name)
            return TableMeta(table_info.schema, table_info.status)
        elif table_info.status != TableMeta.TABLE_STATUS_ACTIVE:
            e = ResourceInUseException()
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            table_name + ' ' + e.message,
                            priority=notifier.PRIORITY_ERROR)
            raise e

        table_info.status = TableMeta.TABLE_STATUS_DELETING

        self._table_info_repo.update(context, table_info, ["status"])

        self._do_delete_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)
Пример #18
0
    def list_tables(self,
                    context,
                    exclusive_start_table_name=None,
                    limit=None):
        tnames = self._table_info_repo.get_tenant_table_names(
            context, exclusive_start_table_name, limit)
        notifier.notify(
            context,
            notifier.EVENT_TYPE_TABLE_LIST,
            dict(exclusive_start_table_name=exclusive_start_table_name,
                 limit=limit),
            priority=notifier.PRIORITY_DEBUG)

        return tnames
Пример #19
0
    def delete_table(self, context, table_name):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_START,
                        table_name)
        try:
            table_info = self._table_info_repo.get(context, table_name,
                                                   ['status'])
        except TableNotExistsException as e:
            notifier.notify(context,
                            notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            e.message,
                            priority=notifier.PRIORITY_ERROR)
            raise

        if table_info.status == TableMeta.TABLE_STATUS_DELETING:
            # table is already being deleted, just return immediately
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                            table_name)
            return TableMeta(table_info.schema, table_info.status)
        elif table_info.status != TableMeta.TABLE_STATUS_ACTIVE:
            e = ResourceInUseException()
            notifier.notify(context,
                            notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            table_name + ' ' + e.message,
                            priority=notifier.PRIORITY_ERROR)
            raise e

        table_info.status = TableMeta.TABLE_STATUS_DELETING

        self._table_info_repo.update(context, table_info, ["status"])

        self._do_delete_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)
Пример #20
0
 def callback(future):
     if not future.exception():
         table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
         table_info.internal_name = future.result()
         self._table_info_repo.update(context, table_info,
                                      ["status", "internal_name"])
         notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_END,
                         table_info.schema)
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
         self._table_info_repo.update(context, table_info, ["status"])
         notifier.notify(context,
                         notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                         future.exception(),
                         priority=notifier.PRIORITY_ERROR)
Пример #21
0
    def process_request(self, req):
        tenant_id = self._get_tenant_id(req)

        now = time.time()
        prev = self.last_time.get(tenant_id, 0)

        if self.rps_per_tenant and now - prev < 1. / self.rps_per_tenant:
            LOG.debug(
                'Request rate for tenant {} exceeded preconfigured'
                ' limit {}. Request rejected.', tenant_id, self.rps_per_tenant)
            notifier.notify({}, notifier.EVENT_TYPE_REQUEST_RATE_LIMITED,
                            tenant_id)
            raise exception.RequestQuotaExceeded()

        self.last_time[tenant_id] = now
Пример #22
0
    def list_tables(self, context, exclusive_start_table_name=None,
                    limit=None):
        tnames = self._table_info_repo.get_tenant_table_names(
            context, exclusive_start_table_name, limit
        )
        notifier.notify(
            context, notifier.EVENT_TYPE_TABLE_LIST,
            dict(
                exclusive_start_table_name=exclusive_start_table_name,
                limit=limit
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return tnames
Пример #23
0
    def create_table(self, context, table_name, table_schema):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_START,
                        table_schema)

        table_info = TableInfo(table_name, table_schema,
                               TableMeta.TABLE_STATUS_CREATING)
        try:
            self._table_info_repo.save(context, table_info)
        except TableAlreadyExistsException as e:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            e.message, priority=notifier.PRIORITY_ERROR)
            raise

        self._do_create_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)
Пример #24
0
 def callback(future):
     if not future.exception():
         self._table_info_repo.delete(
             context, table_info.name
         )
         notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                         table_info.name)
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
         self._table_info_repo.update(
             context, table_info, ["status"]
         )
         notifier.notify(
             context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
             future.exception(), priority=notifier.PRIORITY_ERROR
         )
Пример #25
0
    def _do_create_table(self, context, table_info):
        try:
            table_info.internal_name = self._storage_driver.create_table(
                context, table_info)
            table_info.status = TableMeta.TABLE_STATUS_ACTIVE
            self._table_info_repo.update(context, table_info,
                                         ["status", "internal_name"])
        except BackendInteractionException as ex:
            notifier.notify(context,
                            notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            ex.message,
                            priority=notifier.PRIORITY_ERROR)
            raise

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_END,
                        table_info.schema)
Пример #26
0
    def _do_create_table(self, context, table_info):
        try:
            table_info.internal_name = self._storage_driver.create_table(
                context, table_info
            )
            table_info.status = TableMeta.TABLE_STATUS_ACTIVE
            self._table_info_repo.update(
                context, table_info, ["status", "internal_name"]
            )
        except BackendInteractionException as ex:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            ex.message, priority=notifier.PRIORITY_ERROR)
            raise

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_END,
                        table_info.schema)
Пример #27
0
    def execute_write_batch(self, context, write_request_map):
        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHWRITE_START,
                        write_request_map)
        write_request_list_to_send = []
        for table_name, write_request_list in write_request_map.iteritems():
            table_info = self._table_info_repo.get(context, table_name)
            for req in write_request_list:
                self._validate_table_is_active(table_info)

                if req.is_put:
                    self._validate_table_schema(table_info,
                                                req.attribute_map,
                                                keys_only=False)
                else:
                    self._validate_table_schema(table_info, req.attribute_map)

                write_request_list_to_send.append((table_info, req))

        future_result_list = []
        for i in xrange(0, len(write_request_list_to_send),
                        self._batch_chunk_size):
            req_list = (write_request_list_to_send[i:i +
                                                   self._batch_chunk_size])

            future_result_list.append(
                self._batch_write_async(context, req_list))

        unprocessed_items = {}
        for future_result in future_result_list:
            unprocessed_request_list = future_result.result()
            for (table_info, write_request) in unprocessed_request_list:
                table_name = table_info.name
                tables_unprocessed_items = (unprocessed_items.get(
                    table_name, None))
                if tables_unprocessed_items is None:
                    tables_unprocessed_items = []
                    unprocessed_items[table_name] = tables_unprocessed_items

                tables_unprocessed_items.append(write_request)

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHWRITE_END,
            dict(write_request_map=write_request_map,
                 unprocessed_items=unprocessed_items))

        return unprocessed_items
Пример #28
0
 def callback(future):
     if not future.exception():
         table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
         table_info.internal_name = future.result()
         self._table_info_repo.update(
             context, table_info, ["status", "internal_name"]
         )
         notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_END,
                         table_info.schema)
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
         self._table_info_repo.update(
             context, table_info, ["status"]
         )
         notifier.notify(
             context, notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
             future.exception(), priority=notifier.PRIORITY_ERROR
         )
Пример #29
0
    def create_table(self, context, table_name, table_schema):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_START,
                        table_schema)

        table_info = TableInfo(table_name, table_schema,
                               TableMeta.TABLE_STATUS_CREATING)
        try:
            self._table_info_repo.save(context, table_info)
        except TableAlreadyExistsException as e:
            notifier.notify(context,
                            notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            e.message,
                            priority=notifier.PRIORITY_ERROR)
            raise

        self._do_create_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)
Пример #30
0
    def delete_item(self,
                    context,
                    table_name,
                    key_attribute_map,
                    expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, key_attribute_map)

        with self.__task_semaphore:
            result = self._storage_driver.delete_item(context, table_info,
                                                      key_attribute_map,
                                                      expected_condition_map)
        notifier.notify(context,
                        notifier.EVENT_TYPE_DATA_DELETEITEM,
                        dict(table_name=table_name,
                             key_attribute_map=key_attribute_map,
                             expected_condition_map=expected_condition_map),
                        priority=notifier.PRIORITY_DEBUG)

        return result
Пример #31
0
    def delete_item(self, context, table_name, key_attribute_map,
                    expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, key_attribute_map)

        with self.__task_semaphore:
            result = self._storage_driver.delete_item(
                context, table_info, key_attribute_map, expected_condition_map
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_DELETEITEM,
            dict(
                table_name=table_name,
                key_attribute_map=key_attribute_map,
                expected_condition_map=expected_condition_map
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result
Пример #32
0
    def describe_table(self, context, table_name):
        table_info = self._table_info_repo.get(context, table_name,
                                               ['status', 'last_updated'])
        notifier.notify(context,
                        notifier.EVENT_TYPE_TABLE_DESCRIBE,
                        table_name,
                        priority=notifier.PRIORITY_DEBUG)

        timedelta = datetime.now() - table_info.last_updated

        if timedelta.total_seconds() > self._schema_operation_timeout:
            if table_info.status == TableMeta.TABLE_STATUS_CREATING:
                table_info.status = TableMeta.TABLE_STATUS_CREATE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug("Table '{}' creation timed out."
                          " Setting status to {}".format(
                              table_info.name,
                              TableMeta.TABLE_STATUS_CREATE_FAILED))
                notifier.notify(
                    context.to_dict(), notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                    dict(table_name=table_name, message='Operation timed out'))

            if table_info.status == TableMeta.TABLE_STATUS_DELETING:
                table_info.status = TableMeta.TABLE_STATUS_DELETE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug("Table '{}' deletion timed out."
                          " Setting status to {}".format(
                              table_info.name,
                              TableMeta.TABLE_STATUS_DELETE_FAILED))
                notifier.notify(
                    context.to_dict(), notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                    dict(table_name=table_name, message='Operation timed out'))

        return TableMeta(table_info.schema, table_info.status)
Пример #33
0
    def describe_table(self, context, table_name):
        table_info = self._table_info_repo.get(
            context, table_name, ['status', 'last_updated'])
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DESCRIBE,
                        table_name, priority=notifier.PRIORITY_DEBUG)

        timedelta = datetime.now() - table_info.last_updated

        if timedelta.total_seconds() > self._schema_operation_timeout:
            if table_info.status == TableMeta.TABLE_STATUS_CREATING:
                table_info.status = TableMeta.TABLE_STATUS_CREATE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug(
                    "Table '{}' creation timed out."
                    " Setting status to {}".format(
                        table_info.name, TableMeta.TABLE_STATUS_CREATE_FAILED)
                )
                notifier.notify(
                    context.to_dict(),
                    notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                    dict(
                        table_name=table_name,
                        message='Operation timed out'
                    )
                )

            if table_info.status == TableMeta.TABLE_STATUS_DELETING:
                table_info.status = TableMeta.TABLE_STATUS_DELETE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug(
                    "Table '{}' deletion timed out."
                    " Setting status to {}".format(
                        table_info.name, TableMeta.TABLE_STATUS_DELETE_FAILED)
                )
                notifier.notify(
                    context.to_dict(),
                    notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                    dict(
                        table_name=table_name,
                        message='Operation timed out'
                    )
                )

        return TableMeta(table_info.schema, table_info.status)
Пример #34
0
def bulk_load_app(environ, start_response):
    context = environ['webob.adhoc_attrs']['context']

    path = environ['PATH_INFO']

    LOG.debug('Request received: %s', path)

    if not re.match("^/v1/\w+/data/tables/\w+/bulk_load$", path):
        start_response('404 Not found', [('Content-Type', 'text/plain')])
        yield 'Incorrect url. Please check it and try again\n'
        notifier.notify(context,
                        notifier.EVENT_TYPE_STREAMING_PATH_ERROR,
                        path,
                        priority=notifier.PRIORITY_ERROR)
        return

    url_comp = path.split('/')
    project_id = url_comp[2]
    table_name = url_comp[5]

    LOG.debug('Tenant: %s, table name: %s', project_id, table_name)

    utils.check_project_id(context, project_id)

    notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_START, path)

    read_count = 0
    processed_count = 0
    unprocessed_count = 0
    failed_count = 0
    put_count = 0
    done_count = [0]
    last_read = None
    failed_items = {}

    dont_process = False

    future_ready_event = Event()
    future_ready_queue = Queue.Queue()

    stream = environ['wsgi.input']
    while True:
        chunk = stream.readline()

        if not chunk:
            break

        read_count += 1

        if dont_process:
            LOG.debug('Skipping item #%d', read_count)
            unprocessed_count += 1
            continue

        last_read = chunk

        try:
            future = storage.put_item_async(context, table_name,
                                            make_put_item(chunk))

            put_count += 1

            future.add_done_callback(
                make_callback(future_ready_queue, future_ready_event,
                              done_count, chunk))

            # try to get result of finished futures
            try:
                while True:
                    finished_future, chunk = future_ready_queue.get_nowait()
                    finished_future.result()
                    processed_count += 1
            except Queue.Empty:
                pass

        except Exception as e:
            failed_items[chunk] = repr(e)
            dont_process = True
            LOG.debug('Error inserting item: %s, message: %s', chunk, repr(e))

            notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_ERROR,
                            {
                                'path': path,
                                'item': chunk,
                                'error': e.message
                            })

    LOG.debug('Request body has been read completely')

    # wait for all futures to be finished
    while done_count[0] < put_count:
        LOG.debug('Waiting for %d item(s) to be processed...',
                  put_count - done_count[0])
        future_ready_event.wait()
        future_ready_event.clear()

    LOG.debug('All items are processed. Getting results of item processing...')

    # get results of finished futures
    while done_count[0] > processed_count + failed_count:
        LOG.debug('Waiting for %d result(s)...',
                  done_count[0] - processed_count - failed_count)
        chunk = None
        try:
            finished_future, chunk = future_ready_queue.get_nowait()
            finished_future.result()
            processed_count += 1
        except Queue.Empty:
            break
        except Exception as e:
            failed_count += 1
            failed_items[chunk] = repr(e)
            LOG.debug('Error inserting item: %s, message: %s', chunk, repr(e))

            notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_ERROR,
                            {
                                'path': path,
                                'item': chunk,
                                'error': e.message
                            })

    # Update count if error happened before put_item_async was invoked
    if dont_process:
        failed_count += 1

    start_response('200 OK', [('Content-Type', 'application/json')])

    resp = {
        'read': read_count,
        'processed': processed_count,
        'unprocessed': unprocessed_count,
        'failed': failed_count,
        'last_item': last_read,
        'failed_items': failed_items
    }

    notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_END, {
        'path': path,
        'response': resp
    })

    yield json.dumps(resp)
Пример #35
0
def bulk_load_app(environ, start_response):
    context = environ['webob.adhoc_attrs']['context']

    path = environ['PATH_INFO']

    LOG.debug('Request received: %s', path)

    if not re.match("^/v1/\w+/data/tables/\w+/bulk_load$", path):
        start_response('404 Not found', [('Content-Type', 'text/plain')])
        yield 'Incorrect url. Please check it and try again\n'
        notifier.notify(context, notifier.EVENT_TYPE_STREAMING_PATH_ERROR,
                        path, priority=notifier.PRIORITY_ERROR)
        return

    url_comp = path.split('/')
    project_id = url_comp[2]
    table_name = url_comp[5]

    LOG.debug('Tenant: %s, table name: %s', project_id, table_name)

    utils.check_project_id(context, project_id)

    notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_START, path)

    read_count = 0
    processed_count = 0
    unprocessed_count = 0
    failed_count = 0
    put_count = 0
    done_count = [0]
    last_read = None
    failed_items = {}

    dont_process = False

    future_ready_event = Event()
    future_ready_queue = Queue.Queue()

    stream = environ['wsgi.input']
    while True:
        chunk = stream.readline()

        if not chunk:
            break

        read_count += 1

        if dont_process:
            LOG.debug('Skipping item #%d', read_count)
            unprocessed_count += 1
            continue

        last_read = chunk

        try:
            future = storage.put_item_async(
                context, table_name, make_put_item(chunk)
            )

            put_count += 1

            future.add_done_callback(make_callback(
                future_ready_queue,
                future_ready_event,
                done_count,
                chunk
            ))

            # try to get result of finished futures
            try:
                while True:
                    finished_future, chunk = future_ready_queue.get_nowait()
                    finished_future.result()
                    processed_count += 1
            except Queue.Empty:
                pass

        except Exception as e:
            failed_items[chunk] = repr(e)
            dont_process = True
            LOG.debug('Error inserting item: %s, message: %s',
                      chunk, repr(e))

            notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_ERROR,
                            {'path': path, 'item': chunk, 'error': e.message})

    LOG.debug('Request body has been read completely')

    # wait for all futures to be finished
    while done_count[0] < put_count:
        LOG.debug('Waiting for %d item(s) to be processed...',
                  put_count - done_count[0])
        future_ready_event.wait()
        future_ready_event.clear()

    LOG.debug('All items are processed. Getting results of item processing...')

    # get results of finished futures
    while done_count[0] > processed_count + failed_count:
        LOG.debug('Waiting for %d result(s)...',
                  done_count[0] - processed_count - failed_count)
        chunk = None
        try:
            finished_future, chunk = future_ready_queue.get_nowait()
            finished_future.result()
            processed_count += 1
        except Queue.Empty:
            break
        except Exception as e:
            failed_count += 1
            failed_items[chunk] = repr(e)
            LOG.debug('Error inserting item: %s, message: %s',
                      chunk, repr(e))

            notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_ERROR,
                            {'path': path, 'item': chunk, 'error': e.message})

    # Update count if error happened before put_item_async was invoked
    if dont_process:
        failed_count += 1

    start_response('200 OK', [('Content-Type', 'application/json')])

    resp = {
        'read': read_count,
        'processed': processed_count,
        'unprocessed': unprocessed_count,
        'failed': failed_count,
        'last_item': last_read,
        'failed_items': failed_items
    }

    notifier.notify(context, notifier.EVENT_TYPE_STREAMING_DATA_END,
                    {'path': path, 'response': resp})

    yield json.dumps(resp)
Пример #36
0
    def select_item(self,
                    context,
                    table_name,
                    indexed_condition_map,
                    select_type,
                    index_name=None,
                    limit=None,
                    exclusive_start_key=None,
                    consistent=True,
                    order_type=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        schema_attribute_type_map = table_info.schema.attribute_type_map

        hash_key_name = table_info.schema.hash_key_name
        range_key_name = table_info.schema.range_key_name

        if index_name is not None:
            index_def = table_info.schema.index_def_map.get(index_name)
            if index_def is None:
                raise ValidationError(_(
                    "Index '%(index_name)s' doesn't exist for table "
                    "'%(table_name)s'"),
                                      index_name=index_name,
                                      table_name=table_name)
            range_key_name_to_query = index_def.alt_range_key_attr
        else:
            range_key_name_to_query = range_key_name

        if exclusive_start_key is not None:
            self._validate_table_schema(table_info,
                                        exclusive_start_key,
                                        index_name=index_name)

        indexed_condition_map_copy = indexed_condition_map.copy()

        hash_key_condition_list = indexed_condition_map_copy.pop(
            hash_key_name, None)
        range_key_to_query_condition_list = indexed_condition_map_copy.pop(
            range_key_name_to_query, None)

        indexed_condition_schema_valid = False
        if len(indexed_condition_map_copy) == 0 and hash_key_condition_list:
            hash_key_type = schema_attribute_type_map[hash_key_name]
            for hash_key_condition in hash_key_condition_list:
                for hash_key_condition_arg in hash_key_condition.args:
                    if hash_key_condition_arg.attr_type != hash_key_type:
                        break
                else:
                    continue
                break
            else:
                if range_key_to_query_condition_list:
                    range_key_to_query_type = schema_attribute_type_map[
                        range_key_name_to_query]
                    for range_key_to_query_condition in (
                            range_key_to_query_condition_list):
                        for range_key_to_query_condition_arg in (
                                range_key_to_query_condition.args):
                            if (range_key_to_query_condition_arg.attr_type !=
                                    range_key_to_query_type):
                                break
                        else:
                            continue
                        break
                    else:
                        indexed_condition_schema_valid = True
                else:
                    indexed_condition_schema_valid = True

        if not indexed_condition_schema_valid:
            raise ValidationError(_(
                "Specified query conditions %(indexed_condition_map)s "
                "doesn't match table schema: %(table_schema)s"),
                                  indexed_condition_map=indexed_condition_map,
                                  table_schema=table_info.schema)

        if (len(hash_key_condition_list) != 1
                or hash_key_condition_list[0].type !=
                IndexedCondition.CONDITION_TYPE_EQUAL):
            raise ValidationError(
                _("Only equality condition is allowed for HASH key attribute "
                  "'%(hash_key_name)s'"),
                hash_key_name=hash_key_name,
            )

        with self.__task_semaphore:
            result = self._storage_driver.select_item(
                context, table_info, hash_key_condition_list,
                range_key_to_query_condition_list, select_type, index_name,
                limit, exclusive_start_key, consistent, order_type)
        notifier.notify(context,
                        notifier.EVENT_TYPE_DATA_SELECTITEM,
                        dict(table_name=table_name,
                             indexed_condition_map=indexed_condition_map,
                             select_type=select_type,
                             index_name=index_name,
                             limit=limit,
                             exclusive_start_key=exclusive_start_key,
                             consistent=consistent,
                             order_type=order_type),
                        priority=notifier.PRIORITY_DEBUG)

        return result
Пример #37
0
    def execute_get_batch(self, context, read_request_list):
        assert read_request_list

        items = []
        unprocessed_items = []

        request_count = len(read_request_list)
        done_count = [0]

        done_event = Event()

        prepared_batch = []

        for req in read_request_list:
            def make_request_executor():
                _req = req

                _table_name = _req.table_name
                _key_attribute_map = _req.key_attribute_map

                _table_info = self._table_info_repo.get(context, _table_name)
                self._validate_table_is_active(_table_info)
                self._validate_table_schema(_table_info, _key_attribute_map)

                _attributes_to_get = req.attributes_to_get

                def callback(res):
                    try:
                        items.append((_table_name, res.result()))
                    except Exception:
                        unprocessed_items.append(_req)
                        LOG.exception("Can't process GetItemRequest")
                    done_count[0] += 1
                    if done_count[0] >= request_count:
                        done_event.set()

                def executor():
                    future_result = self._get_item_async(
                        context, _table_info,
                        _key_attribute_map.get(
                            _table_info.schema.hash_key_name
                        ),
                        _key_attribute_map.get(
                            _table_info.schema.range_key_name
                        ),
                        _attributes_to_get, consistent=_req.consistent
                    )
                    future_result.add_done_callback(callback)
                return executor
            prepared_batch.append(make_request_executor())

        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHREAD_START,
                        read_request_list)

        for request_executor in prepared_batch:
            request_executor()

        done_event.wait()

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHREAD_END,
            dict(
                read_request_list=read_request_list,
                unprocessed_items=unprocessed_items
            )
        )

        return items, unprocessed_items
Пример #38
0
    def select_item(self, context, table_name, indexed_condition_map,
                    select_type, index_name=None, limit=None,
                    exclusive_start_key=None, consistent=True,
                    order_type=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        schema_attribute_type_map = table_info.schema.attribute_type_map

        hash_key_name = table_info.schema.hash_key_name
        range_key_name = table_info.schema.range_key_name

        if index_name is not None:
            index_def = table_info.schema.index_def_map.get(index_name)
            if index_def is None:
                raise ValidationError(
                    _("Index '%(index_name)s' doesn't exist for table "
                      "'%(table_name)s'"),
                    index_name=index_name, table_name=table_name)
            range_key_name_to_query = index_def.alt_range_key_attr
        else:
            range_key_name_to_query = range_key_name

        if exclusive_start_key is not None:
            self._validate_table_schema(
                table_info, exclusive_start_key, index_name=index_name
            )

        indexed_condition_map_copy = indexed_condition_map.copy()

        hash_key_condition_list = indexed_condition_map_copy.pop(hash_key_name,
                                                                 None)
        range_key_to_query_condition_list = indexed_condition_map_copy.pop(
            range_key_name_to_query, None
        )

        indexed_condition_schema_valid = False
        if len(indexed_condition_map_copy) == 0 and hash_key_condition_list:
            hash_key_type = schema_attribute_type_map[hash_key_name]
            for hash_key_condition in hash_key_condition_list:
                for hash_key_condition_arg in hash_key_condition.args:
                    if hash_key_condition_arg.attr_type != hash_key_type:
                        break
                else:
                    continue
                break
            else:
                if range_key_to_query_condition_list:
                    range_key_to_query_type = schema_attribute_type_map[
                        range_key_name_to_query
                    ]
                    for range_key_to_query_condition in (
                            range_key_to_query_condition_list):
                        for range_key_to_query_condition_arg in (
                                range_key_to_query_condition.args):
                            if (range_key_to_query_condition_arg.attr_type !=
                                    range_key_to_query_type):
                                break
                        else:
                            continue
                        break
                    else:
                        indexed_condition_schema_valid = True
                else:
                    indexed_condition_schema_valid = True

        if not indexed_condition_schema_valid:
            raise ValidationError(
                _("Specified query conditions %(indexed_condition_map)s "
                  "doesn't match table schema: %(table_schema)s"),
                indexed_condition_map=indexed_condition_map,
                table_schema=table_info.schema
            )

        if (len(hash_key_condition_list) != 1 or
                hash_key_condition_list[0].type !=
                IndexedCondition.CONDITION_TYPE_EQUAL):
            raise ValidationError(
                _("Only equality condition is allowed for HASH key attribute "
                  "'%(hash_key_name)s'"),
                hash_key_name=hash_key_name,
            )

        with self.__task_semaphore:
            result = self._storage_driver.select_item(
                context, table_info, hash_key_condition_list,
                range_key_to_query_condition_list, select_type,
                index_name, limit, exclusive_start_key, consistent, order_type
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_SELECTITEM,
            dict(
                table_name=table_name,
                indexed_condition_map=indexed_condition_map,
                select_type=select_type,
                index_name=index_name,
                limit=limit,
                exclusive_start_key=exclusive_start_key,
                consistent=consistent,
                order_type=order_type
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result
Пример #39
0
    def execute_get_batch(self, context, read_request_list):
        assert read_request_list

        items = []
        unprocessed_items = []

        request_count = len(read_request_list)
        done_count = [0]

        done_event = Event()

        prepared_batch = []

        for req in read_request_list:

            def make_request_executor():
                _req = req

                _table_name = _req.table_name
                _key_attribute_map = _req.key_attribute_map

                _table_info = self._table_info_repo.get(context, _table_name)
                self._validate_table_is_active(_table_info)
                self._validate_table_schema(_table_info, _key_attribute_map)

                _attributes_to_get = req.attributes_to_get

                def callback(res):
                    try:
                        items.append((_table_name, res.result()))
                    except Exception:
                        unprocessed_items.append(_req)
                        LOG.exception("Can't process GetItemRequest")
                    done_count[0] += 1
                    if done_count[0] >= request_count:
                        done_event.set()

                def executor():
                    future_result = self._get_item_async(
                        context,
                        _table_info,
                        _key_attribute_map.get(
                            _table_info.schema.hash_key_name),
                        _key_attribute_map.get(
                            _table_info.schema.range_key_name),
                        _attributes_to_get,
                        consistent=_req.consistent)
                    future_result.add_done_callback(callback)

                return executor

            prepared_batch.append(make_request_executor())

        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHREAD_START,
                        read_request_list)

        for request_executor in prepared_batch:
            request_executor()

        done_event.wait()

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHREAD_END,
            dict(read_request_list=read_request_list,
                 unprocessed_items=unprocessed_items))

        return items, unprocessed_items