def _assertOutput(self, num_samples=None): with closing(S3Connection()) as s3: bucket = Bucket(s3, self.output_dir.netloc) prefix = self.output_dir.path[1:] for i in range(1 if num_samples is None else num_samples): output_file = self._sample_name(None if num_samples is None else i) + '.tar.gz' key = bucket.get_key(posixpath.join(prefix, output_file), validate=True) # FIXME: We may want to validate the output a bit more self.assertTrue(key.size > 0)
def _assertOutput(self, num_samples=None, bam=False): with closing(S3Connection()) as s3: bucket = Bucket(s3, self.output_dir.netloc) prefix = self.output_dir.path[1:] for i in range(1 if num_samples is None else num_samples): value = None if num_samples is None else i output_file = self._sample_name(value, bam=bam) + '.tar.gz' key = bucket.get_key(posixpath.join(prefix, output_file), validate=True) # FIXME: We may want to validate the output a bit more self.assertTrue(key.size > 0)
def _assertOutput(self, num_samples=None): with closing(S3Connection()) as s3: bucket = Bucket(s3, self.output_dir.netloc) prefix = self.output_dir.path[1:] for i in range(1 if num_samples is None else num_samples): output_file = self._sample_name( None if num_samples is None else i) + '.tar.gz' output_file = 'FAIL.' + output_file # This flag is added by bamQC key = bucket.get_key(posixpath.join(prefix, output_file), validate=True) # FIXME: We may want to validate the output a bit more self.assertTrue(key.size > 0)
def process_file(aws_conn, filepath): mtime = get_mtime(filepath) name_200 = add_size_name(filepath, '200') name_800 = add_size_name(filepath, '800') mtime_200 = get_mtime(name_200) mtime_800 = get_mtime(name_800) im = None if mtime_200 is None or mtime_200 < mtime: try: im = Image.open(filepath) except: return None generate_200(im, name_200) if mtime_800 is None or mtime_800 < mtime: if im is None: try: im = Image.open(filepath) except: return None generate_800(im, name_800) names = { 'original': filepath, 'thumbnail': name_200, 'display': name_800, } b = Bucket(aws_conn, BUCKET) image_result = {} for image_type, name in names.items(): aws_tag_path = add_size_name(name, 's3t') + '.meta' aws_key_path = name[len(GALLERY_DIR):].strip('/') image_result[image_type] = { 'url': 'http://s3.amazonaws.com/{}/{}'.format( BUCKET, aws_key_path) } if not is_newer(name, aws_tag_path): try: resolution = load_data(aws_tag_path) resolution['width'] except: resolution = get_resolution(name) save_data(aws_tag_path, resolution) image_result[image_type].update(resolution) continue resolution = get_resolution(name) image_result.update(resolution) save_data(aws_tag_path, resolution) s3key = b.get_key(aws_key_path) mtime = get_mtime(name) if s3key and s3key.last_modified: print datetime.datetime(*parsedate(s3key.last_modified)[:6]) print mtime if datetime.datetime(*parsedate(s3key.last_modified)[:6]) > mtime: with open(aws_tag_path, 'a'): os.utime(aws_tag_path, None) continue print 'Sending {} to S3'.format(name) k = Key(b) k.key = aws_key_path expires = datetime.datetime.utcnow() + datetime.timedelta(days=25 * 365) expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT") k.set_metadata("Content-Type", mimetypes.guess_type(name)[0]) k.set_metadata("Expires", expires) k.set_metadata("Cache-Control", "max-age={0}, public".format(86400 * 365 * 25)) k.set_contents_from_filename(name) k.set_acl('public-read') with open(aws_tag_path, 'a'): os.utime(aws_tag_path, None) photo_age = get_photo_age(filepath) image_result['caption'] = get_caption(filepath) return photo_age, image_result