def measure(value): if value.proportion is None: pause = 0.5 for i in xrange(4): try: fetched_image = yield ctx.urlfetch('%s=s100' % value.serving_url) # http://stackoverflow.com/q/14944317/376238 break except Exception as e: time.sleep(pause) pause = pause * 2 image = images.Image(image_data=fetched_image.content) value.proportion = float(image.width) / float(image.height) raise orm.Return(True)
def process_image(value, i, values): config = self._process_config new_value = value gs_object_name = new_value.gs_object_name new_gs_object_name = new_value.gs_object_name if config.get('copy'): new_value = copy.deepcopy(value) new_gs_object_name = '%s_%s' % (new_value.gs_object_name, config.get('copy_name')) blob_key = None # @note No try block is implemented here. This code is no longer forgiving. # If any of the images fail to process, everything is lost/reverted, because one or more images: # - are no longer existant in the cloudstorage / .read(); # - are not valid / not image exception; # - failed to resize / resize could not be done; # - failed to create gs key / blobstore failed for some reason; # - failed to create get_serving_url / serving url service failed for some reason; # - failed to write to cloudstorage / cloudstorage failed for some reason. readonly_blob = cloudstorage.open(gs_object_name[3:], 'r') blob = readonly_blob.read() readonly_blob.close() image = images.Image(image_data=blob) if config.get('transform'): image.resize(config.get('width'), config.get('height'), crop_to_fit=config.get('crop_to_fit', False), crop_offset_x=config.get('crop_offset_x', 0.0), crop_offset_y=config.get('crop_offset_y', 0.0)) blob = yield image.execute_transforms_async(output_encoding=image.format) new_value.proportion = float(image.width) / float(image.height) new_value.size = len(blob) writable_blob = cloudstorage.open(new_gs_object_name[3:], 'w', content_type=new_value.content_type) writable_blob.write(blob) writable_blob.close() if gs_object_name != new_gs_object_name: new_value.gs_object_name = new_gs_object_name blob_key = yield blobstore.create_gs_key_async(new_gs_object_name) new_value.image = blobstore.BlobKey(blob_key) new_value.serving_url = None values[i] = new_value raise orm.Return(True)
def async(entity): gs_object_name = entity.gs_object_name try: gs_object_name = entity.parse_duplicate_appendix(gs_object_name) except IndexError: pass new_gs_object_name = '%s_duplicate_%s' % (gs_object_name, entity.duplicate_appendix) readonly_blob = cloudstorage.open(gs_object_name[3:], 'r') writable_blob = cloudstorage.open(new_gs_object_name[3:], 'w', content_type=entity.content_type) # Less consuming memory write, can be only used when using brute force copy. # There is no copy feature in cloudstorage sdk, so we have to implement our own! while True: blob_segment = readonly_blob.read(1000000) # Read 1mb per write, that should be enough. if not blob_segment: break writable_blob.write(blob_segment) readonly_blob.close() writable_blob.close() entity.gs_object_name = new_gs_object_name blob_key = yield blobstore.create_gs_key_async(new_gs_object_name) entity.image = blobstore.BlobKey(blob_key) entity.serving_url = yield images.get_serving_url_async(entity.image) self._property.save_blobs_on_success(entity.image) raise orm.Return(entity)
def mapper(values): for i, v in enumerate(values): yield process_image(v, i, values) raise orm.Return(True)
def mapper(values): yield map(measure, values) raise orm.Return(True)
def mapper(values): yield map(generate, values) raise orm.Return(True)
def generate(value): if value.serving_url is None: value.serving_url = yield images.get_serving_url_async(value.image) raise orm.Return(True)
def mapper(entities): out = yield map(async, entities) raise orm.Return(out)