def flush_at(): # creates a values map structure to retrieve the complete # set of inbound documents that have not yet been submitted # to at for the flush operation kwargs = { "filter_string": "", "start_record": 0, "number_records": 1000, "sort": "issue_date:ascending", "filters[]": [ "issue_date:greater:1356998400", "submitted_at:equals:2", "document_type:equals:3" ] } api = util.get_api() documents = api.list_signed_documents(**kwargs) # filters the result set retrieved so that only the valid at # "submittable" documents are present in the sequence valid_documents = [value for value in documents\ if value["_class"] in util.AT_SUBMIT_TYPES] # "calculates" the total set of valid documents present in the # valid documents and starts the index counter total = len(valid_documents) index = 1 # iterates over the complete set of valid documents to be sent # (submitted) to at and processes the submission for document in valid_documents: type = document["_class"] object_id = document["object_id"] representation = document["representation"] issue_date = document["issue_date"] issue_date_d = datetime.datetime.utcfromtimestamp(issue_date) issue_date_s = issue_date_d.strftime("%d %b %Y %H:%M:%S") # retrieves the current time and uses it to print debug information # about the current document submission to at quorum.info("Submitting %s - %s (%s) [%d/%d]" % (type, representation, issue_date_s, index, total)) try: # starts the submission process for the invoice, taking into # account that the document id to be submitted is the one that # has been extracted from the (signed) document structure api.submit_invoice_at(object_id) except Exception as exception: quorum.error("Exception while submitting document - %s" % quorum.legacy.UNICODE(exception)) else: quorum.info("Document submitted with success") # increments the index counter, because one more document # as been processed (submitted or failed) index += 1 return flask.redirect( flask.url_for("index", message="Signed documents have been sent to AT"))
def do_media_extras(): # retrieves the reference to the (omni) api object that # is going to be used for the operations of updating of # the merchandise in bulk (multiple operations at a time) api = util.get_api() # tries to retrieve the media file from the current # form in case it's not available renders the current # template with an error message media_file = quorum.get_field("media_file", None) if media_file == None or not media_file.filename: return flask.render_template( "extra/media.html.tpl", link = "extras", error = "No file defined" ) # creates a temporary file path for the storage of the file # and then saves it into that directory, closing the same # file afterwards, as it has been properly saved fd, file_path = tempfile.mkstemp() try: media_file.save(file_path) finally: media_file.close() try: # creates a new temporary directory that is going to be used # in the extraction of the media zip file temp_path = tempfile.mkdtemp() try: # creates the zip file reference with the current file path # and then extracts the complete set of contents to the "target" # temporary path closing the zip file afterwards zip = zipfile.ZipFile(file_path) try: zip.extractall(temp_path) finally: zip.close() # iterates over the complete set of names in the temporary path # to try to upload the media to the target data source, note that # only the media files are considered and the base name of them # are going to be validation for existence in the data source for name in os.listdir(temp_path): # splits the file name into base name and extension and validates # the extension, so that only media files are considered base, extension = os.path.splitext(name) if not extension.lower() in (".png", ".jpg", ".jpeg"): quorum.info("Skipping, '%s' not a valid media file" % name) continue # splits the base value of the file name so that it's possible to # extract the proper position of the image if that's required base_s = base.rsplit("_", 1) if len(base_s) > 1: position = int(base_s[1]) else: position = 1 # tries to "cast" the base file name value as an integer and in case # it's possible assumes that this value is the object identifier try: object_id = int(base_s[0]) except: object_id = None # in case no object id was retrieved from the base file name value # a secondary strategy is used, so that the merchandise database # is searched using the base string value as the company product code if not object_id: # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base_s[0] ] } # runs the list merchandise operation in order to try to find a # merchandise entity for the requested (unique) product code in # case there's at least one merchandise its object id is used try: merchandise = api.list_merchandise(**kwargs) except: merchandise = [] if merchandise: object_id = merchandise[0]["object_id"] # in case no object id was retrieved must skip the current loop # with a proper information message (as expected) if not object_id: quorum.info("Skipping, could not resolve object id for '%s'" % base) continue # prints a logging message about the upload of media file that # is going to be performed for the current entity quorum.debug( "Adding media file for entity '%d' in position '%d'" %\ (object_id, position) ) # creates the target temporary media path from the temporary directory # path and then "read" the complete set of contents from it closing the # file afterwards (no more reading allowed) media_path = os.path.join(temp_path, name) media_file = open(media_path, "rb") try: contents = media_file.read() finally: media_file.close() # tries to guess the proper image type for the image located at the # provided path and the uses this value to construct the mime type image_type = imghdr.what(media_path) mime_type = "image/" + image_type if image_type else "image/unknown" # sets/updates the media for the associated root entity using the # data extracted from the file and the information in its name api.set_media_entity( object_id, contents, position = position, mime_type = mime_type, engine = "fs", thumbnails = True ) finally: # removes the temporary path as it's no longer going to be # required for the operation (errors are ignored) shutil.rmtree(temp_path, ignore_errors = True) finally: # closes the temporary file descriptor and removes the temporary # file (avoiding any memory leaks) os.close(fd) os.remove(file_path) # redirects the user back to the media list page with a success # message indicating that everything went as expected return flask.redirect( flask.url_for( "media_extras", message = "Media file processed with success" ) )
def flush_at(): # creates a values map structure to retrieve the complete # set of inbound documents that have not yet been submitted # to at for the flush operation kwargs = { "filter_string" : "", "start_record" : 0, "number_records" : 1000, "sort" : "issue_date:ascending", "filters[]" : [ "issue_date:greater:1356998400", "submitted_at:equals:2", "document_type:equals:3" ] } api = util.get_api() documents = api.list_signed_documents(**kwargs) # filters the result set retrieved so that only the valid at # "submittable" documents are present in the sequence valid_documents = [value for value in documents\ if value["_class"] in util.AT_SUBMIT_TYPES] # "calculates" the total set of valid documents present in the # valid documents and starts the index counter total = len(valid_documents) index = 1 # iterates over the complete set of valid documents to be sent # (submitted) to at and processes the submission for document in valid_documents: type = document["_class"] object_id = document["object_id"] representation = document["representation"] issue_date = document["issue_date"] issue_date_d = datetime.datetime.utcfromtimestamp(issue_date) issue_date_s = issue_date_d.strftime("%d %b %Y %H:%M:%S") # retrieves the current time and uses it to print debug information # about the current document submission to at quorum.info( "Submitting %s - %s (%s) [%d/%d]" % ( type, representation, issue_date_s, index, total ) ) try: # starts the submission process for the invoice, taking into # account that the document id to be submitted is the one that # has been extracted from the (signed) document structure api.submit_invoice_at(object_id) except Exception as exception: quorum.error("Exception while submitting document - %s" % quorum.legacy.UNICODE(exception)) else: quorum.info("Document submitted with success") # increments the index counter, because one more document # as been processed (submitted or failed) index += 1 return flask.redirect( flask.url_for( "index", message = "Signed documents have been sent to AT" ) )
def callback(line, header = None): if custom: # creates a zip of tuples with the header to line value association # and uses them to build a proper dictionary zipped = zip(header, line) update = dict(zipped) # iterates over the complete set of items in the map of values # and update the update map with the sanitized value for name, value in update.items(): update[name] = value.strip() # tries to retrieve the base identifier of the entity # this value is going to be used as the basis for identification base = update.pop("code", None) base = update.pop("company_product_code", base) base = update.pop("object_id", base) base = update.pop("base", base) # tries to retrieve some of the base entity values # if their found they are properly poped out name = update.pop("name", None) description = update.pop("description", None) else: # unpacks the current "metadata" line into its components as # expected by the specification base,\ name,\ _retail_price,\ characteristics,\ material,\ category,\ collection,\ brand,\ description,\ order = line[:10] # normalizes the various values that have been extracted from the line # so they are properly represented for importing characteristics = [value.strip() for value in characteristics.split(";") if value.strip()] material = [value.strip() for value in material.split(";") if value.strip()] category = [value.strip() for value in category.split(";") if value.strip()] collection = [value.strip() for value in collection.split(";") if value.strip()] brand = brand or None order = order or None # verifies and strips the various possible string values so that they # represent a valid not trailed value if name: name = name.strip() if brand: brand = brand.strip() if description: description = description.strip() if order: order = int(order.strip()) # creates the update dictionary that is going to be used in the updating # of the "product" metadata update = dict( characteristics = characteristics, material = material, category = category, collection = collection, brand = brand, order = order ) # tries to "cast" the base value as an integer and in case # it's possible assumes that this value is the object identifier try: object_id = int(base) except: object_id = None # in case no object id was retrieved from the base name value # a secondary strategy is used, so that the merchandise database # is searched using the base string value as the company product code if not object_id: # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base ] } # runs the list merchandise operation in order to try to find a # merchandise entity for the requested (unique) product code in # case there's at least one merchandise its object id is used try: merchandise = api.list_merchandise(**kwargs) except: merchandise = [] if merchandise: object_id = merchandise[0]["object_id"] # in case no object id was retrieved must skip the current loop # with a proper information message (as expected) if not object_id: quorum.info("Skipping, could not resolve object id for '%s'" % base) return # prints a logging message about the updating of the metadata for # the entity with the current object id quorum.debug("Setting metadata for entity '%d'" % object_id) # retrieves the reference to the entity so that it's possible to # retrieve the currently defined metadata for it (to be updated) entity = api.get_entity(object_id) metadata = entity.get("metadata", {}) or {} # updates the metadata dictionary with the new values that are going # to be used for the updating of the entity, note that the previous # metadata values are leveraged and not overwritten with this strategy metadata.update(update) # creates the model structure to be updated and then runs the # proper execution of the metadata import model = dict(metadata = metadata) if name: model["name"] = name if description: model["description"] = description api.update_entity(object_id, payload = dict(root_entity = model))
def do_images_extras(): # retrieves the reference to the (omni) api object that # is going to be used for the operations of updating of # the merchandise in bulk (multiple operations at a time) api = util.get_api() # tries to retrieve the images file from the current # form in case it's not available renders the current # template with an error message images_file = quorum.get_field("images_file", None) if images_file == None or not images_file.filename: return flask.render_template( "extra/images.html.tpl", link = "extras", error = "No file defined" ) # creates a temporary file path for the storage of the file # and then saves it into that directory, closing the same # file afterwards, as it has been properly saved fd, file_path = tempfile.mkstemp() try: images_file.save(file_path) finally: images_file.close() try: # creates a new temporary directory that is going to be used # in the extraction of the images zip file temp_path = tempfile.mkdtemp() try: # creates the zip file reference with the current file path # and then extracts the complete set of contents to the "target" # temporary path closing the zip file afterwards zip = zipfile.ZipFile(file_path) try: zip.extractall(temp_path) finally: zip.close() # iterates over the complete set of names in the temporary path # to try to upload the image to the target data source, note that # only the image files are considered and the base name of them # are going to be validation for existence in the data source for name in os.listdir(temp_path): # splits the file name into base name and extension and validates # the extension, so that only image files are considered base, extension = os.path.splitext(name) if not extension.lower() in (".png", ".jpg", ".jpeg"): quorum.info("Skipping, '%s' not a valid image file" % name) continue # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base ] } # creates the url for the merchandise retrieval and runs the get # operation with the provided filter so that the target merchandise # is retrieved for object id validation merchandise = api.list_merchandise(**kwargs) # verifies that at least one entity was retrieved in case nothing # is found skips the current loop with a not found error if not merchandise: quorum.info("Skipping, '%s' not found in data source" % base) continue # prints a logging message about the upload of image file that # is going to be performed for the current merchandise quorum.debug("Changing image file for merchandise '%s'" % base) # retrieves the first entity from the resulting list and then retrieves # the object identifier from it to be used in the update operation entity = merchandise[0] object_id = entity["object_id"] # creates the target temporary image path from the temporary directory # path and then "read" the complete set of contents from it closing the # file afterwards (no more reading allowed) image_path = os.path.join(temp_path, name) image_file = open(image_path, "rb") try: contents = image_file.read() finally: image_file.close() # creates the image (file) tuple with both the name of the file and the # contents if it (multipart standard) image_tuple = (name, contents) # creates the multipart data map with both the object id and the image # file parameters that are going to be used in the encoding data_m = { "object_id" : object_id, "transactional_merchandise[_parameters][image_file]" : image_tuple } # uses the "resolved" items structure in the operation to # the omni api so that the images for them get updated api.update_merchandise(object_id, data_m) finally: # removes the temporary path as it's no longer going to be # required for the operation (errors are ignored) shutil.rmtree(temp_path, ignore_errors = True) finally: # closes the temporary file descriptor and removes the temporary # file (avoiding any memory leaks) os.close(fd) os.remove(file_path) # redirects the user back to the images list page with a success # message indicating that everything went as expected return flask.redirect( flask.url_for( "images_extras", message = "Images file processed with success" ) )
def do_media_extras(): # retrieves the reference to the (Omni) API object that # is going to be used for the operations of updating of # the merchandise in bulk (multiple operations at a time) api = util.get_api() # tries to retrieve the media file from the current # form in case it's not available renders the current # template with an error message media_file = quorum.get_field("media_file", None) if media_file == None or not media_file.filename: return flask.render_template( "extra/media.html.tpl", link = "extras", error = "No file defined" ) # creates a temporary file path for the storage of the file # and then saves it into that directory, closing the same # file afterwards, as it has been properly saved fd, file_path = tempfile.mkstemp() try: media_file.save(file_path) finally: media_file.close() try: # creates a new temporary directory that is going to be used # in the extraction of the media zip file temp_path = tempfile.mkdtemp() try: # creates the zip file reference with the current file path # and then extracts the complete set of contents to the "target" # temporary path closing the zip file afterwards zip = zipfile.ZipFile(file_path) try: zip.extractall(temp_path) finally: zip.close() # iterates over the complete set of names in the temporary path # to try to upload the media to the target data source, note that # only the media files are considered and the base name of them # are going to be validation for existence in the data source for name in os.listdir(temp_path): # splits the file name into base name and extension and validates # the extension, so that only media files are considered base, extension = os.path.splitext(name) if not extension.lower() in (".png", ".jpg", ".jpeg"): quorum.info("Skipping, '%s' not a valid media file" % name) continue # splits the base value of the file name so that it's possible to # extract the proper position of the image if that's required base_s = base.rsplit("_", 1) if len(base_s) > 1: position = int(base_s[1]) else: position = 1 # tries to "cast" the base file name value as an integer and in case # it's possible assumes that this value is the object identifier try: object_id = int(base_s[0]) except Exception: object_id = None # in case no object id was retrieved from the base file name value # a secondary strategy is used, so that the merchandise database # is searched using the base string value as the company product code if not object_id: # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base_s[0] ] } # runs the list merchandise operation in order to try to find a # merchandise entity for the requested (unique) product code in # case there's at least one merchandise its object id is used try: merchandise = api.list_merchandise(**kwargs) except Exception: merchandise = [] if merchandise: object_id = merchandise[0]["object_id"] # in case no object id was retrieved must skip the current loop # with a proper information message (as expected) if not object_id: quorum.info("Skipping, could not resolve Object ID id for '%s'" % base) continue # prints a logging message about the upload of media file that # is going to be performed for the current entity quorum.debug( "Adding media file for entity '%d' in position '%d'" %\ (object_id, position) ) # creates the target temporary media path from the temporary directory # path and then "read" the complete set of contents from it closing the # file afterwards (no more reading allowed) media_path = os.path.join(temp_path, name) media_file = open(media_path, "rb") try: contents = media_file.read() finally: media_file.close() # tries to guess the proper image type for the image located at the # provided path and the uses this value to construct the mime type image_type = imghdr.what(media_path) mime_type = "image/" + image_type if image_type else "image/unknown" # sets/updates the media for the associated root entity using the # data extracted from the file and the information in its name api.set_media_entity( object_id, contents, position = position, mime_type = mime_type, engine = "fs", thumbnails = True ) finally: # removes the temporary path as it's no longer going to be # required for the operation (errors are ignored) shutil.rmtree(temp_path, ignore_errors = True) finally: # closes the temporary file descriptor and removes the temporary # file (avoiding any memory leaks) os.close(fd) os.remove(file_path) # redirects the user back to the media list page with a success # message indicating that everything went as expected return flask.redirect( flask.url_for( "media_extras", message = "Media file processed with success" ) )
def callback(line, header = None): # in case the custom metadata mode is enabled then a special work # model is set where all of the columns are going to be used dynamically # for the update of the metadata map of the object if custom: # creates a zip of tuples with the header to line value association # and uses them to build a proper dictionary zipped = zip(header, line) update = dict(zipped) # iterates over the complete set of items in the map of values # and updates the update map with the sanitized value for name, value in update.items(): update[name] = value.strip() # tries to retrieve the base identifier of the entity # this value is going to be used as the basis for identification base = update.pop("code", None) base = update.pop("company_product_code", base) base = update.pop("object_id", base) base = update.pop("base", base) # tries to retrieve some of the base entity values # if their found they are properly popped out name = update.pop("name", None) description = update.pop("description", None) upc = update.pop("upc", None) ean = update.pop("ean", None) # otherwise this is a "normal" update and the "typical" metadata # fields are the one to be updated else: # unpacks the current "metadata" line into its components as # expected by the specification base,\ name,\ _retail_price,\ compare_price,\ discount,\ characteristics,\ material,\ category,\ collection,\ brand,\ season,\ gender,\ description,\ order,\ discountable,\ sku_field,\ upc,\ ean = line[:18] # verifies if the initials part of the CSV line exists and # if that's the case processes it properly if len(line) > 20: initials, initials_min, initials_max = line[18:21] else: initials, initials_min, initials_max = "", "", "" # normalizes the various values that have been extracted from the line # so they are properly represented for importing name = name or None compare_price = (compare_price and compare_price.strip()) or None discount = (discount and discount.strip()) or None characteristics = [value.strip() for value in characteristics.split(";") if value.strip()] material = [value.strip() for value in material.split(";") if value.strip()] category = [value.strip() for value in category.split(";") if value.strip()] collection = [value.strip() for value in collection.split(";") if value.strip()] brand = brand or None season = season or None gender = gender or None description = description or None order = (order and order.strip()) or None discountable = discountable or None sku_field = sku_field or None upc = upc or None ean = ean or None initials = initials or None initials_min = initials_min or None initials_max = initials_max or None # verifies and strips the various possible string values so that they # represent a valid not trailed value if name: name = name.strip() if compare_price: compare_price = float(compare_price) if brand: brand = brand.strip() if season: season = season.strip() if gender: gender = gender.strip() if description: description = description.strip() if order: order = int(order) if discountable: discountable = discountable == "1" if sku_field: sku_field = sku_field.strip() if discount: discount = float(discount) if upc: upc = upc.strip() if ean: ean = ean.strip() if initials: initials = initials == "1" if initials_min: initials_min = int(initials_min) if initials_max: initials_max = int(initials_max) # creates the map that is going to hold the complete set of features # and populates the features according to their existence features = dict() if initials: features["initials"] = dict(min = initials_min, max = initials_max) # creates the update dictionary that is going to be used in the updating # of the "product" metadata (this is considered to be a delta dictionary) update = dict( compare_price = compare_price, discount = discount, characteristics = characteristics, features = features, material = material, category = category, collection = collection, brand = brand, season = season, gender = gender, order = order, discountable = discountable, sku_field = sku_field ) # tries to "cast" the base value as an integer and in case # it's possible assumes that this value is the object identifier try: object_id = int(base) except Exception: object_id = None # in case no object id was retrieved from the base name value # a secondary strategy is used, so that the merchandise database # is searched using the base string value as the company product code if not object_id: # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base ] } # runs the list merchandise operation in order to try to find a # merchandise entity for the requested (unique) product code in # case there's at least one merchandise its object id is used try: merchandise = api.list_merchandise(**kwargs) except Exception: merchandise = [] if merchandise: object_id = merchandise[0]["object_id"] # in case no object id was retrieved must skip the current loop # with a proper information message (as expected) if not object_id: quorum.info("Skipping, could not resolve Object ID for '%s'" % base) return # prints a logging message about the updating of the metadata for # the entity with the current object id quorum.debug("Setting metadata for entity '%d'" % object_id) # retrieves the reference to the entity so that it's possible to # retrieve the currently defined metadata for it (to be updated) entity = api.get_entity(object_id) metadata = entity.get("metadata", {}) or {} # updates the metadata dictionary with the new values that are going # to be used for the updating of the entity, note that the previous # metadata values are leveraged and not overwritten with this strategy metadata.update(update) # creates the model structure to be updated and then runs the # proper execution of the metadata import model = dict(metadata = metadata) if name: model["name"] = name if description: model["description"] = description if upc: model["upc"] = upc if ean: model["ean"] = ean api.update_entity(object_id, payload = dict(root_entity = model))
def do_images_extras(): # retrieves the reference to the (Omni) API object that # is going to be used for the operations of updating of # the merchandise in bulk (multiple operations at a time) api = util.get_api() # tries to retrieve the images file from the current # form in case it's not available renders the current # template with an error message images_file = quorum.get_field("images_file", None) if images_file == None or not images_file.filename: return flask.render_template( "extra/images.html.tpl", link = "extras", error = "No file defined" ) # creates a temporary file path for the storage of the file # and then saves it into that directory, closing the same # file afterwards, as it has been properly saved fd, file_path = tempfile.mkstemp() try: images_file.save(file_path) finally: images_file.close() try: # creates a new temporary directory that is going to be used # in the extraction of the images zip file temp_path = tempfile.mkdtemp() try: # creates the zip file reference with the current file path # and then extracts the complete set of contents to the "target" # temporary path closing the zip file afterwards zip = zipfile.ZipFile(file_path) try: zip.extractall(temp_path) finally: zip.close() # iterates over the complete set of names in the temporary path # to try to upload the image to the target data source, note that # only the image files are considered and the base name of them # are going to be validation for existence in the data source for name in os.listdir(temp_path): # splits the file name into base name and extension and validates # the extension, so that only image files are considered base, extension = os.path.splitext(name) if not extension.lower() in (".png", ".jpg", ".jpeg"): quorum.info("Skipping, '%s' not a valid image file" % name) continue # creates the keyword arguments map so that the the merchandise # with the provided company product code is retrieved kwargs = { "start_record" : 0, "number_records" : 1, "filters[]" : [ "company_product_code:equals:%s" % base ] } # creates the URL for the merchandise retrieval and runs the get # operation with the provided filter so that the target merchandise # is retrieved for object id validation merchandise = api.list_merchandise(**kwargs) # verifies that at least one entity was retrieved in case nothing # is found skips the current loop with a not found error if not merchandise: quorum.info("Skipping, '%s' not found in data source" % base) continue # prints a logging message about the upload of image file that # is going to be performed for the current merchandise quorum.debug("Changing image file for merchandise '%s'" % base) # retrieves the first entity from the resulting list and then retrieves # the object identifier from it to be used in the update operation entity = merchandise[0] object_id = entity["object_id"] # creates the target temporary image path from the temporary directory # path and then "read" the complete set of contents from it closing the # file afterwards (no more reading allowed) image_path = os.path.join(temp_path, name) image_file = open(image_path, "rb") try: contents = image_file.read() finally: image_file.close() # creates the image (file) tuple with both the name of the file and the # contents if it (multipart standard) image_tuple = (name, contents) # creates the multipart data map with both the object id and the image # file parameters that are going to be used in the encoding data_m = { "object_id" : object_id, "transactional_merchandise[_parameters][image_file]" : image_tuple } # uses the "resolved" items structure in the operation to # the Omni API so that the images for them get updated api.update_merchandise(object_id, data_m) finally: # removes the temporary path as it's no longer going to be # required for the operation (errors are ignored) shutil.rmtree(temp_path, ignore_errors = True) finally: # closes the temporary file descriptor and removes the temporary # file (avoiding any memory leaks) os.close(fd) os.remove(file_path) # redirects the user back to the images list page with a success # message indicating that everything went as expected return flask.redirect( flask.url_for( "images_extras", message = "Images file processed with success" ) )