def testCopyBlobs(self): from zope.copy import copy file = NamedBlobFile() file.data = u'hello, world' image = NamedBlobImage() image.data = 'some image bytes' transaction.commit() file_copy = copy(file) self.assertEqual(file_copy.data, file.data) image_copy = copy(image) self.assertEqual(image_copy.data, image.data)
def testCopyBlobs(self): from zope.copy import copy file = NamedBlobFile() file.data = u'hello, world' image = NamedBlobImage() image.data = 'some image bytes' transaction.commit() file_copy = copy(file) self.assertEqual(file_copy.data, file.data) image_copy = copy(image) self.assertEqual(image_copy.data, image.data)
def create_lead_image(size=(800, 450), color="blue"): """ Creates an memory object containing an image. Expects a size tuple and PIL color. :param size: tuple of ints (width, height) default (800, 450) :param color: String or PIL color (r,g,b) tuple. :return: NamedBlobImage """ # Create an image. im = Image.new("RGB", size, color=color) # Draw some lines draw = ImageDraw.Draw(im) color = ImageColor.getrgb(color) for i in range(9): factor = choice(range(8, 18, 1)) / 10.0 stroke_color = ( int(min(color[0] * factor, 255)), int(min(color[1] * factor, 255)), int(min(color[2] * factor, 255)), ) draw.line( [ (choice(range(0, size[0])), choice(range(0, size[1]))), (choice(range(0, size[0])), choice(range(0, size[1]))) ], fill=stroke_color, width=int(size[1] / 5) ) # 'Save' the file. sio = cStringIO.StringIO() im.save(sio, format="PNG") sio.seek(0) # Create named blob image nbi = NamedBlobImage() nbi.data = sio.read() nbi.filename = u"example.png" return nbi
def create_lead_image(size=(800, 450), color="blue"): """ Creates an memory object containing an image. Expects a size tuple and PIL color. :param size: tuple of ints (width, height) default (800, 450) :param color: String or PIL color (r,g,b) tuple. :return: NamedBlobImage """ # Create an image. im = Image.new("RGB", size, color=color) # Draw some lines draw = ImageDraw.Draw(im) color = ImageColor.getrgb(color) for i in range(9): factor = choice(range(8, 18, 1)) / 10.0 stroke_color = ( int(min(color[0] * factor, 255)), int(min(color[1] * factor, 255)), int(min(color[2] * factor, 255)), ) draw.line([(choice(range(0, size[0])), choice(range(0, size[1]))), (choice(range(0, size[0])), choice(range(0, size[1])))], fill=stroke_color, width=int(size[1] / 5)) # 'Save' the file. sio = cStringIO.StringIO() im.save(sio, format="PNG") sio.seek(0) # Create named blob image nbi = NamedBlobImage() nbi.data = sio.read() nbi.filename = u"example.png" return nbi
def getDefaultForFieldType(self, field): # default string default_value = self.placeholder # Field Class field_klass = field.__class__.__name__ # Handle different value_type attributes of the field. value_type = getattr(field, 'value_type', None) if value_type: # If we're a data grid field if isinstance(value_type, DictRow): kwargs = {} s = getattr(value_type, 'schema', None) if s: for (_name, _field) in getAllSchemaFieldsAndDescriptions(s): if not isinstance(field, Method): kwargs[_name] = self.getDefaultForFieldType(_field) return [ kwargs, ] elif isinstance(value_type, (schema.TextLine, )): pass elif isinstance(value_type, (schema.Choice, )): vocabulary_name = getattr(value_type, 'vocabularyName', None) vocabulary = getattr(value_type, 'vocabulary', None) if vocabulary_name: vocabulary_factory = getUtility(IVocabularyFactory, vocabulary_name) vocabulary = vocabulary_factory(self.context) if vocabulary: if isinstance(vocabulary, SimpleVocabulary): try: default_value = vocabulary.by_value.keys()[0] except: pass else: pass # Return nothing for methods if field_klass in [ 'Method', ]: return None # Define dummy fields rich_text = RichTextValue(raw='<p>%s</p>' % self.placeholder, mimeType=u'text/html', outputMimeType='text/x-html-safe') named_blob_file = NamedBlobFile(filename=u'sample.pdf', contentType='application/pdf') named_blob_file.data = self.placeholder.encode('utf-8') named_blob_image = NamedBlobImage(filename=u'sample.png', contentType='image/png') named_blob_image.data = self.placeholder.encode('utf-8') # Simple field defaults defaults = { 'Int': 10, 'Text': "\n".join(3 * [self.placeholder]), 'List': [ default_value, ], 'Tuple': (default_value, ), 'TextLine': default_value, 'Bool': True, 'Datetime': datetime.now(), 'RichText': rich_text, 'NamedBlobFile': named_blob_file, 'NamedBlobImage': named_blob_image, 'Choice': default_value, } # If a default, return that. Otherwise, return the placeholder. return defaults.get(field_klass, self.placeholder)
def onVideoSave(context, event, force=False): # Updated flag updated = False # Check if we have missing fields has_leadimage = not not ILeadImageMarker(context).has_leadimage has_duration = not not VideoDataAdapter(context).getDuration() has_channel = not not VideoDataAdapter(context).getVideoChannel() has_aspect_ratio = not not VideoDataAdapter(context).getVideoAspectRatio() has_effective_date = not not context.effective_date # If 'force' is true, or doesn't have a lead image and duration. if force or not has_leadimage or not has_duration \ or not has_channel or not has_aspect_ratio \ or not has_effective_date: # Get the API data data = getDataForYouTubeVideo(context) # Lead Image (if force or not exists) if force or not has_leadimage: # Get the image data via the YouTube API # (will be binary data) image_data = getImageForYouTubeVideo(data) # Set the lead image if we retrieved it if image_data: filename = '%s-leadimage' % context.getId() field = NamedBlobImage(filename.encode('utf-8')) field.data = image_data context.leadimage = field updated = True # Duration (if force or not exists) if force or not has_duration: # Get the duration from the API data # (will be integer) duration = data.get('duration', None) # If there's a duration, set it. if duration: VideoDataAdapter(context).setDuration(duration) updated = True # Channel (if force or not exists) if force or not has_channel: # Get the channel id from the API data # (will be string) channel_id = data.get('channel_id', None) # If there's a channel id, set it. if channel_id: VideoDataAdapter(context).setVideoChannel(channel_id) updated = True # Aspect Ratio (if force or not exists) if force or not has_aspect_ratio: # Get the aspect_ratio from the API data # (will be string) aspect_ratio = data.get('aspect_ratio', None) # If there's a Aspect Ratio, set it. if aspect_ratio: VideoDataAdapter(context).setVideoAspectRatio(aspect_ratio) updated = True # Published date if force or not has_effective_date: try: if data['effective']: effective_date = DateTime(data['effective']) context.setEffectiveDate(effective_date) updated = True except: pass # If we updated a value, reindex the object if updated: context.reindexObject()
def importContent(self): # Grab the "force" parameter from the URL, and convert to boolean. # If this is true, everyone will be updated. force_update = not not self.request.get('force', False) # Initialize the return value list rv = [] # Get a list of the people people = self.getPeople() # Count total people, and initialize counter total_people = len(people) counter = 0 # Log progress self.log("Downloaded data, %d entries" % total_people) # Get listing of valid classifications from current directory valid_classifications = [x.value for x in ClassificationsVocabulary()(self.context)] # Add other classifications that are OK to import valid_classifications.extend([ 'Adjunct and Affiliate Faculty', 'Emeritus and Retired Faculty', 'Emeritus Professors and Retirees', 'Adjunct & Retired Faculty', 'Emeritus Faculty', 'Emeriti and Retired', 'Assistant to the Director', 'Retired and Emeritus Faculty', 'District Directors', ]) # Iterate through contents for i in people: # Increment counter counter = counter + 1 # Set 'product_type' as 'type' i['product_type'] = i.get('type', None) # If we were passed a URL, don't do a classification check if self.person_url: pass # If the person is not an employee with a valid classification, skip # this record. elif not set(i.get('directory_classifications', [])) & set(valid_classifications): continue # Create content importer from person JSON data v = self.content_importer(i) # Check for person existing in directory (import_path) and create or # update appropriately if v.data.get_id in self.import_path.objectIds(): # Update item = self.import_path[v.data.get_id] # Only update if feed last updated date is greater than the item # last updated date. If a "force" parameter is provided in the # URL, all objects will be updated. if force_update or DateTime(v.data.modified) > item.modified(): item = self.updateObject(item, v) # Log progress self.log("Updated %s" % v.data.get_id) else: # Log progress self.log("Skipped %s" % v.data.get_id) continue else: # Create item = self.createObject(self.import_path, v) # Log progress self.log("Created %s" % v.data.get_id) # Set expiration date if v.data.expires: expires = DateTime(v.data.expires) item.setExpirationDate(expires) # Set Image image_url = i.get('image_url', None) if image_url: img = NamedBlobImage() img.data = urllib2.urlopen(image_url).read() item.leadimage = img # Set text field for bio. Note this is not a RichTextValue, since # it's a field inherited from another schema if v.data.html: item.bio = v.data.html # Finalize item self.finalize(item) # Append the data structure converted from the JSON data to rv rv.append(json.loads(self.getJSON(item))) # Log progress self.log("Done with %s, %d/%d" % (v.data.get_id, counter, total_people)) # Deactivate people who are expired and active self.deactivateExpiredPeople() # Return a full JSON dump of the updated data return json.dumps(rv, indent=4, sort_keys=True)