示例#1
0
def copy_blobs(args=None):
    if args is None:
        args = sys.argv[1:]
    [blob_dir] = args

    bucket_name, folder = os.environ['S3_FOLDER'].split('/', 1)

    conn = boto.s3.connection.S3Connection()
    bucket = conn.get_bucket(bucket_name)
    prefix = blob_dir
    if not prefix.endswith('/'):
        prefix += '/'
    lprefix = len(prefix)

    key = boto.s3.key.Key(bucket)

    logfile = open("/tmp/copy_blobs_to_s3-%s-%s-%s.log"
                   % (bucket_name, folder, time.time()),
                   'w')

    for dirpath, dirs, files in os.walk(blob_dir):
        for n in files:
            if not n.endswith('.blob'):
                continue
            p = os.path.join(dirpath, n)
            oid = ''.join(hexmatch(seg).group(1)
                          for seg in dirpath[lprefix:].split('/')
                          )
            serial = n[2:-5]
            key.key = "%s/%s/%s" % (folder, oid, serial)
            t = time.time()
            key.set_contents_from_filename(p)
            sz = os.stat(p).st_size
            print >>logfile, int((time.time()-t)*1000000), sz
示例#2
0
文件: s3.py 项目: xsmart/nokkhum
    def __push_to_s3(self, processor_id, file_list, prefix_dir):

        try:
            processor_bucket = self.connection.get_bucket(processor_id)
        except Exception as e:
            logger.exception(e)
            
            try:
                self.connection.create_bucket(processor_id)
                processor_bucket = self.connection.get_bucket(processor_id)
            except Exception as e:
                logger.exception(e)
                return

        prefix_dir = prefix_dir + '/' + processor_id + '/'
        prefix_length = len(prefix_dir)

        for file_name in file_list:
            key = boto.s3.key.Key(processor_bucket)
#            print file_name[prefix_length:]
            key.key = file_name[prefix_length:]
            key.set_contents_from_filename(file_name)
            logger.debug("push %s to bucket %s key: %s complete" %
                         (file_name, processor_bucket, key.key))
            os.remove(file_name)
            # sleep for other thread run
            time.sleep(self.sleep_time)
示例#3
0
def s3_upload(bucket_name, local_filename, s3_filename):
    conn = boto.connect_s3()
    bucket = boto.s3.bucket.Bucket(conn, bucket_name)
    key = boto.s3.key.Key(bucket)
    key.key = s3_filename
    key.set_contents_from_filename(local_filename)
    key.make_public()
示例#4
0
文件: __init__.py 项目: Kazzer/s3pi
def upload_to_s3(
        directory,
        settings,
        modified_files,
        region='us-east-1',
        log=logging.getLogger(__name__),
):
    """Uploads the local directory to the S3 Package Index"""
    s3_conn = None
    try:
        s3_conn = boto.s3.connect_to_region(region)
    except (
            boto.exception.NoAuthHandlerFound,
    ) as error:
        log.critical(error)
    else:
        s3_prefix = ensure_ends_with_slash(settings.get('s3.prefix'))

        s3_bucket = s3_conn.get_bucket(settings.get('s3.bucket'))

        for modified_file in modified_files:
            key = boto.s3.key.Key(
                bucket=s3_bucket,
                name=(s3_prefix + modified_file[len(directory)+1:]),
            )
            log.info(
                'Uploading "%s" to "%s" in "%s"',
                modified_file,
                key.name,
                key.bucket.name,
            )
            key.set_contents_from_filename(
                modified_file,
            )
            key.set_acl('public-read')
示例#5
0
def export_upload_file(monitor_id):
    monitor = models.FileMonitor.objects.get(pk=monitor_id)
    full = monitor.full_path()
    monitor.status = "Checking"
    monitor.save()
    if not os.path.exists(full):
        logger.error("OS Error in file uploader")
        monitor.status = "Error: file does not exist"
        monitor.save()
        return

    digest_hex, diges_64, size = md5_stats_file(full)
    monitor.size = size
    monitor.md5sum = digest_hex
    monitor.url = "{0}:{1}".format(monitor.name, monitor.md5sum)

    monitor.status = "Connecting"
    monitor.save()

    try:
        con = boto.connect_s3(settings.AWS_ACCESS_KEY, settings.AWS_SECRET_KEY)
        bucket = con.get_bucket(settings.AWS_BUCKET_NAME)
        key = bucket.get_key(monitor.url)
        if key is not None:
            monitor.status = "Complete"
            monitor.save()
            return
        key = bucket.new_key(monitor.url)
    except Exception as err:
        logger.exception("Connecting error")
        monitor.status = "Error: {0}".format(err)
        monitor.save()
        return

    monitor.status = "Uploading"
    monitor.save()

    # Rewrite this into a class or a callable object
    # last_time = time.time()
    def get_progress(current, total):
        # now = time.time()
        # if now - last_time >= 0.5:
        monitor.progress = current
        monitor.save()
        # last_time = now

    try:
        key.set_contents_from_filename(full,
                                       cb=get_progress,
                                       num_cb=1000,
                                       md5=(digest_hex, diges_64))
    except Exception as err:
        logger.exception("Uploading error")
        monitor.status = "Error: Uploading {0}".format(err)[:60]
        monitor.save()
        return

    monitor.progress = monitor.size
    monitor.status = "Complete"
    monitor.save()
示例#6
0
def add_thumb(config, local_filename, remote_filename, extension):
    bucket = _get_s3_bucket(config)

    key = boto.s3.key.Key(bucket)
    key.key = remote_filename
    key.set_contents_from_filename(local_filename)
    key.set_metadata('Content-Type', "image/" + extension)
    key.set_acl('public-read')
示例#7
0
def add_thumb(config, local_filename, remote_filename, extension):
    bucket = _get_s3_bucket(config)

    key = boto.s3.key.Key(bucket)
    key.key = remote_filename
    key.set_contents_from_filename(local_filename)
    key.set_metadata('Content-Type', "image/" + extension)
    key.set_acl('public-read')
示例#8
0
def export_upload_file(monitor_id):
    monitor = models.FileMonitor.objects.get(pk=monitor_id)
    full = monitor.full_path()
    monitor.status = "Checking"
    monitor.save()
    if not os.path.exists(full):
        logger.error("OS Error in file uploader")
        monitor.status = "Error: file does not exist"
        monitor.save()
        return

    digest_hex, diges_64, size = md5_stats_file(full)
    monitor.size = size
    monitor.md5sum = digest_hex
    monitor.url = "{0}:{1}".format(monitor.name, monitor.md5sum)

    monitor.status = "Connecting"
    monitor.save()

    try:
        con = boto.connect_s3(settings.AWS_ACCESS_KEY, settings.AWS_SECRET_KEY)
        bucket = con.get_bucket(settings.AWS_BUCKET_NAME)
        key = bucket.get_key(monitor.url)
        if key is not None:
            monitor.status = "Complete"
            monitor.save()
            return
        key = bucket.new_key(monitor.url)
    except Exception as err:
        logger.exception("Connecting error")
        monitor.status = "Error: {0}".format(err)
        monitor.save()
        return

    monitor.status = "Uploading"
    monitor.save()

    # Rewrite this into a class or a callable object
    #last_time = time.time()
    def get_progress(current, total):
        #now = time.time()
        #if now - last_time >= 0.5:
        monitor.progress = current
        monitor.save()
            #last_time = now

    try:
        key.set_contents_from_filename(full, cb=get_progress, num_cb=1000,
            md5=(digest_hex, diges_64))
    except Exception as err:
        logger.exception("Uploading error")
        monitor.status = "Error: Uploading {0}".format(err)[:60]
        monitor.save()
        return

    monitor.progress = monitor.size
    monitor.status = "Complete"
    monitor.save()
示例#9
0
文件: stage.py 项目: atsansone/mut
    def upload_path(self, src: str, dest: str, **options) -> boto.s3.key.Key:
        key = boto.s3.key.Key(self.get_connection())
        key.key = dest

        if self.dry_run:
            return key

        key.set_contents_from_filename(src, **options)
        return key
示例#10
0
    def upload_path(self, src, dest, **options):
        key = boto.s3.key.Key(self.get_connection())
        key.key = dest

        if self.dry_run:
            return key

        key.set_contents_from_filename(src, **options)
        return key
示例#11
0
def upload_release(target_file, s3_key):
    conn = boto.s3.connection.S3Connection(
        env.aws_access_key, env.aws_secret_key, proxy=env.http_proxy, proxy_port=env.http_proxy_port
    )
    bucket = conn.get_bucket(env.s3_bucket)
    key = boto.s3.key.Key(bucket)
    key.key = s3_key
    key.set_contents_from_filename(target_file)
    key.make_public()
    print("Uploaded release as public to http://s3.amazonaws.com/%s/%s" % (bucket.name, s3_key))
示例#12
0
def upload_release(target_file, s3_key):
    conn = boto.s3.connection.S3Connection(env.aws_access_key,
            env.aws_secret_key, proxy=env.http_proxy,
            proxy_port=env.http_proxy_port)
    bucket = conn.get_bucket(env.s3_bucket)
    key = boto.s3.key.Key(bucket)
    key.key = s3_key
    key.set_contents_from_filename(target_file)
    key.make_public()
    print ("Uploaded release as public to http://s3.amazonaws.com/%s/%s" %
            (bucket.name, s3_key))
示例#13
0
def upload_gearbox_app(upload_release_file):
    name = parser.get('release', 'name')
    version = parser.get('release', 'version')
    s3_conn = boto.connect_s3()
    bucket = s3_conn.get_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.key = '{0}/{1}.tar.gz'.format(name, version)
    key.set_contents_from_filename('gearbox_dist/{0}.tar.gz'.format(version))
    print "Uploaded gearbox update"
    if upload_release_file:
        key = boto.s3.key.Key(bucket)
        key.key = '{0}/LATEST'.format(name)
        key.set_contents_from_string(version)
示例#14
0
def s3_upload_dir(bucket, path, prefix="", connection_data=None):
    if isinstance(bucket, basestring):
        with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
            bucket = conn.lookup(bucket)
    for root, dirs, files in os.walk(path):
        for fil in files:
            with contextlib.closing(boto.s3.key.Key(bucket)) as key:
                source = root + os.sep + fil
                target = re.sub("^" + re.escape(path) + "?/", prefix, source)
                if os.sep != '/':
                    target = re.sub(re.escape(os.sep), '/', target)
                key.key = target
                LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
                key.set_contents_from_filename(source)
示例#15
0
文件: s3.py 项目: AminaMseddi/tempest
def s3_upload_dir(bucket, path, prefix="", connection_data=None):
    if isinstance(bucket, basestring):
        with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
            bucket = conn.lookup(bucket)
    for root, dirs, files in os.walk(path):
        for fil in files:
            with contextlib.closing(boto.s3.key.Key(bucket)) as key:
                source = root + os.sep + fil
                target = re.sub("^" + re.escape(path) + "?/", prefix, source)
                if os.sep != '/':
                    target = re.sub(re.escape(os.sep), '/', target)
                key.key = target
                LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
                key.set_contents_from_filename(source)
示例#16
0
文件: pipdeps.py 项目: mjs/juju
def command_update(s3, requirements, verbose=False):
    bucket = s3.lookup(BUCKET)
    if bucket is None:
        if verbose:
            print("Creating bucket {}".format(BUCKET))
        bucket = s3.create_bucket(BUCKET, policy="public-read")
    with utility.temp_dir() as archives_dir:
        run_pip_install(
            ["--download", archives_dir], requirements, verbose=verbose)
        for archive in os.listdir(archives_dir):
            filename = os.path.join(archives_dir, archive)
            key = boto.s3.key.Key(bucket)
            key.key = PREFIX + archive
            key.set_contents_from_filename(filename, policy="public-read")
示例#17
0
def command_update(s3, requirements, verbose=False):
    bucket = s3.lookup(BUCKET)
    if bucket is None:
        if verbose:
            print("Creating bucket {}".format(BUCKET))
        bucket = s3.create_bucket(BUCKET, policy="public-read")
    with utility.temp_dir() as archives_dir:
        run_pip_install(["--download", archives_dir],
                        requirements,
                        verbose=verbose)
        for archive in os.listdir(archives_dir):
            filename = os.path.join(archives_dir, archive)
            key = boto.s3.key.Key(bucket)
            key.key = PREFIX + archive
            key.set_contents_from_filename(filename, policy="public-read")
示例#18
0
  def upload_file(self, source, bucket_name, key_name):
    """ Uploads a file from the local filesystem to Amazon S3.

    Args:
      source: A str containing the name of the file on the local filesystem that
        should be uploaded to Amazon S3.
      bucket_name: A str containing the name of the bucket that the file should
        be placed in.
      key_name: A str containing the name of the key that the file should be
        placed in.
    """
    bucket = self.connection.lookup(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.key = key_name
    key.set_contents_from_filename(source)
示例#19
0
文件: media.py 项目: kissarat/pin
def _upload_file_to_bucket(server, filename):
    '''
    Upload the file to the bucket and returns the URL to serve that file.
    Using the server, upload filename to a bucket. The bucket
    is in server.path. After that, user server.url to generate
    the URL that will be used to server the image from now on
    and return that URL.
    '''
    _, filename_part = os.path.split(filename)
    pathname = _generate_path_name_for(filename_part)
    bucket_name = server.path
    #connection = boto.s3.connection.S3Connection()
    connection = boto.connect_s3(
        aws_access_key_id=_get_aws_access_key_id(),
        aws_secret_access_key=_get_aws_secret_access_key())
    bucket = connection.get_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.key = pathname
    key.set_contents_from_filename(filename)
    key.set_acl('public-read')
    connection.close()
    return '{}/{}'.format(server.url, pathname)
示例#20
0
    def upload_logfile(self, log_archive_dir, logfile):
        """
        Takes the path to the log archive directory and a LogFile
        corresponding to a file therein. Uploads the file, returning the
        file's URI.
        """

        logfile_uri = get_logfile_uri(self.upload_uri, logfile)
        u = boto.storage_uri(logfile_uri)
        bucket = self._get_bucket(u.bucket_name)
        key = bucket.get_key(u.object_name)
        if key is not None:
            logging.warning(
                'S3Uploader.upload_logfile: %s already uploaded',
                logfile_uri)
            return
        key = bucket.new_key(u.object_name)
        try:
            key.set_contents_from_filename(
                os.path.join(log_archive_dir, logfile.filename)
            )
        except boto.exception.BotoServerError, e:
            return e
示例#21
0
  def upload(self, mpi, source, target, pos = 0, chunk = 0, part = 0):
    '''Thread worker for upload operation.'''
    s3url = S3URL(target)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)

    # Initialization: Set up multithreaded uploads.
    if not mpi:
      fsize = os.path.getsize(source)
      key = bucket.get_key(s3url.path)

      # optional checks
      if self.opt.dry_run:
        message('%s => %s', source, target)
        return
      elif self.opt.sync_check and self.sync_check(source, key):
        message('%s => %s (synced)', source, target)
        return
      elif not self.opt.force and key:
        raise Failure('File already exists: %s' % target)

      # extra headers
      extra_headers = {}
      if self.opt.add_header:
        for hdr in self.opt.add_header:
          try:
            key, val = hdr.split(":", 1)
          except ValueError:
            raise Failure("Invalid header format: %s" % hdr)
          key_inval = re.sub("[a-zA-Z0-9-.]", "", key)
          if key_inval:
            key_inval = key_inval.replace(" ", "<space>")
            key_inval = key_inval.replace("\t", "<tab>")
            raise ParameterError("Invalid character(s) in header name '%s': \"%s\"" % (key, key_inval))
          extra_headers[key.strip().lower()] = val.strip()

      # Small file optimization.
      if fsize < self.opt.max_singlepart_upload_size:
        key = boto.s3.key.Key(bucket)
        key.key = s3url.path
        key.set_metadata('privilege',  self.get_file_privilege(source))
        key.set_contents_from_filename(source, reduced_redundancy=self.opt.reduced_redundancy, headers=extra_headers)
        if self.opt.acl_public:
          key.set_acl('public-read')
        message('%s => %s', source, target)
        return

      # Here we need to have our own md5 value because multipart upload calculates
      # different md5 values.
      mpu = bucket.initiate_multipart_upload(s3url.path, metadata = {'md5': self.file_hash(source), 'privilege': self.get_file_privilege(source)})

      for args in self.get_file_splits(mpu.id, source, target, fsize, self.opt.multipart_split_size):
        self.pool.upload(*args)
      return

    # Handle each part in parallel, post initialization.
    for mp in bucket.list_multipart_uploads():
      if mp.id == mpi.id:
        mpu = mp
        break
    if mpu is None:
      raise Failure('Could not find MultiPartUpload %s' % mpu_id)

    data = None
    with open(source, 'rb') as f:
      f.seek(pos)
      data = f.read(chunk)
    if not data:
      raise Failure('Unable to read data from source: %s' % source)

    mpu.upload_part_from_file(StringIO(data), part)

    # Finalize
    if mpi.complete():
      try:
        mpu.complete_upload()
        message('%s => %s', source, target)
      except Exception as e:
        mpu.cancel_upload()
        raise RetryFailure('Upload failed: Unable to complete upload %s.' % source)
def test():
    print '--- running AWS s3 examples ---'
    c = boto.s3.connection.S3Connection(conf.AWS_ACCESS_KEY, conf.AWS_SECRET_ACCESS_KEY)

    print 'original bucket number:', len(c.get_all_buckets())
    
    bucket_name = 'yet.another.s3.example.code'
    print 'creating a bucket:', bucket_name
    try:
        bucket = c.create_bucket(bucket_name)
    except boto.exception.S3CreateError  as e:
        print ' ' * 4, 'error occured:'
        print ' ' * 8, 'http status code:', e.status
        print ' ' * 8, 'reason:', e.reason
        print ' ' * 8, 'body:', e.body
        return

    test_bucket_name = 'no.existence.yet.another.s3.example.code'
    print 'if you just want to know whether the bucket(\'%s\') exists or not' % (test_bucket_name,), \
        'and don\'t want to get this bucket'
    try:
        test_bucket = c.head_bucket(test_bucket_name)
    except boto.exception.S3ResponseError as e:
        if e.status == 403 and e.reason == 'Forbidden':
            print ' ' * 4, 'the bucket(\'%s\') exists but you don\'t have the permission.' % (test_bucket_name,)
        elif e.status == 404 and e.reason == 'Not Found':
            print ' ' * 4, 'the bucket(\'%s\') doesn\'t exist.' % (test_bucket_name,)

    print 'or use lookup() instead of head_bucket() to do the same thing.', \
        'it will return None if the bucket does not exist instead of throwing an exception.'
    test_bucket = c.lookup(test_bucket_name)
    if test_bucket is None:
        print ' ' * 4, 'the bucket(\'%s\') doesn\'t exist.' % (test_bucket_name,)

    print 'now you can get the bucket(\'%s\')' % (bucket_name,)
    bucket = c.get_bucket(bucket_name)

    print 'add some objects to bucket ', bucket_name
    keys = ['sample.txt', 'notes/2006/January/sample.txt', 'notes/2006/February/sample2.txt',\
           'notes/2006/February/sample3.txt', 'notes/2006/February/sample4.txt', 'notes/2006/sample5.txt']
    print ' ' * 4, 'these key names are:'
    for name in keys:
        print ' ' * 8, name
    
    filename = './_test_dir/sample.txt'
    print ' ' * 4, 'you can contents of object(\'%s\') from filename(\'%s\')' % (keys[0], filename,)
    key = boto.s3.key.Key(bucket, keys[0])
    bytes_written = key.set_contents_from_filename(filename)
    assert bytes_written == os.path.getsize(filename), '    error occured:broken file'
        
    print ' ' * 4, 'or set contents of object(\'%s\') by opened file object' % (keys[1],)
    fp = open(filename, 'r')
    key = boto.s3.key.Key(bucket, keys[1])
    bytes_written = key.set_contents_from_file(fp)
    assert bytes_written == os.path.getsize(filename), '    error occured:broken file'

    print ' ' * 4, 'you can also set contents the remaining key objects from string'
    for name in keys[2:]:
        print ' ' * 8, 'key:', name
        key = boto.s3.key.Key(bucket, name)
        s = 'This is the content of %s ' % (name,)
        key.set_contents_from_string(s)
        print ' ' * 8, '..contents:', key.get_contents_as_string()
        # use get_contents_to_filename() to save contents to a specific file in the filesystem.

    #print 'You have %d objects in bucket %s' % ()    
    
    print 'list all objects added into \'%s\' bucket' % (bucket_name,)
    print ' ' * 4, 'list() automatically handles all of the result paging from S3.'
    print ' ' * 4, 'You just need to keep iterating until there are no more results.'
    print ' ' * 4, '---------------------------------------------------------------'
    bucket_size = 0
    for key in bucket.list():
        print ' ' * 4, key.name
        bucket_size += key.size
    print ' ' * 4, 'bucket size:', bucket_size, 'bytes.'
    # do not caculate bucket size or number of objects when you have millions of objects in a bucket.

    p = 'notes/2006/'
    print 'list objects start with \'%s\'' % (p,)
    objs = bucket.list(prefix = p)
    for key in objs:
        print ' ' * 4, key.name

    print 'list objects or key prefixs like \'%s/*\', something like what\'s in the top of \'%s\' folder ?' % (p, p,)
    objs = bucket.list(prefix = p, delimiter = '/')
    for key in objs:
        print ' ' * 4, key.name

    keys_per_page = 4
    print 'manually handle the results paging from s3,', ' number of keys per page:', keys_per_page
    print ' ' * 4, 'get page 1'
    objs = bucket.get_all_keys(max_keys = keys_per_page)
    for key in objs:
        print ' ' * 8, key.name

    print ' ' * 4, 'get page 2'
    last_key_name = objs[-1].name   #last key of last page is the marker to retrive next page.
    objs = bucket.get_all_keys(max_keys = keys_per_page, marker = last_key_name)
    for key in objs:
        print ' ' * 8, key.name
    """
    get_all_keys() a lower-level method for listing contents of a bucket.
    This closely models the actual S3 API and requires you to manually handle the paging of results. 
    For a higher-level method that handles the details of paging for you, you can use the list() method.
    """

    print 'you must delete all objects in the bucket \'%s\' before delete this bucket' % (bucket_name, )
    print ' ' * 4, 'you can delete objects one by one'
    bucket.delete_key(keys[0])
    print ' ' * 4, 'or you can delete multiple objects using a single HTTP request with delete_keys().'
    bucket.delete_keys(keys[1:])

    #TODO print 'after previous deletion, we now have %d objects in bucket(\'%s\')' % (len(bucket.list()), bucket_name,)
    print 'now you can delete the bucket \'%s\'' % (bucket_name,)
    c.delete_bucket(bucket)
示例#23
0
import argparse
import boto
import boto.s3.key
import os


def progress_cb(curr, total):
    print('%d / %d B (%.2f%%)') % (curr, total, curr * 100.0 / float(total))


if __name__ == '__main__':
    parser = argparse.ArgumentParser('upload a file to S3')
    parser.add_argument('file', help='path to the local file to upload')
    parser.add_argument('bucket', help='name of the S3 bucket to upload into')

    args = parser.parse_args()

    bucket = boto.connect_s3().get_bucket(args.bucket)
    key = boto.s3.key.Key(bucket=bucket, name=os.path.basename(args.file))
    key.set_contents_from_filename(args.file, cb=progress_cb, num_cb=20)
示例#24
0
  def upload(self, mpi, source, target, pos = 0, chunk = 0, part = 0):
    '''Thread worker for upload operation.'''
    s3url = S3URL(target)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)

    # Initialization: Set up multithreaded uploads.
    if not mpi:
      fsize = os.path.getsize(source)
      key = bucket.get_key(s3url.path)

      # optional checks
      if self.opt.dry_run:
        message('%s => %s', source, target)
        return
      elif self.opt.sync_check and self.sync_check(source, key):
        message('%s => %s (synced)', source, target)
        return
      elif not self.opt.force and key:
        raise Failure('File already exists: %s' % target)

      # Small file optimization.
      if fsize < self.opt.max_singlepart_upload_size:
        key = boto.s3.key.Key(bucket)
        key.key = s3url.path
        key.set_metadata('privilege',  self.get_file_privilege(source))
        key.set_contents_from_filename(source)
        message('%s => %s', source, target)
        return

      # Here we need to have our own md5 value because multipart upload calculates
      # different md5 values.
      mpu = bucket.initiate_multipart_upload(s3url.path, metadata = {'md5': self.file_hash(source), 'privilege': self.get_file_privilege(source)})

      for args in self.get_file_splits(mpu.id, source, target, fsize, self.opt.multipart_split_size):
        self.pool.upload(*args)
      return

    # Handle each part in parallel, post initialization.
    for mp in bucket.list_multipart_uploads():
      if mp.id == mpi.id:
        mpu = mp
        break
    if mpu is None:
      raise Failure('Could not find MultiPartUpload %s' % mpu_id)

    data = None
    with open(source, 'rb') as f:
      f.seek(pos)
      data = f.read(chunk)
    if not data:
      raise Failure('Unable to read data from source: %s' % source)

    mpu.upload_part_from_file(StringIO(data), part)

    # Finalize
    if mpi.complete():
      try:
        mpu.complete_upload()
        message('%s => %s', source, target)
      except Exception as e:
        mpu.cancel_upload()
        raise RetryFailure('Upload failed: Unable to complete upload %s.' % source)
示例#25
0
文件: views.py 项目: jaio46/BitVid
def upload(request):
    if request.method == "GET":

        return render(request, "upload.html")
    else:

        user = request.user
        file = request.FILES.get("file", None)

        if file is None:
            return HttpResponseBadRequest("You did not pass a file.")

        title = request.POST.get("title", "")
        desc = request.POST.get("desc", "")
        channel_id_str = request.POST.get("channel", "1")

        try:
            channel_id = int(channel_id_str)
        except ValueError:
            return HttpResponseBadRequest("Channel ID must be an integer")

        if title == "":
            return HttpResponseBadRequest("Title must not be empty.")

        if not re.match(r'^.{0,2000}$', title):
            return HttpResponseBadRequest("Description must not be longer than 2000 characters.")

        if not Channel.objects.filter(pk=channel_id).exists():
            return HttpResponseBadRequest("Channel ID must be a valid channel.")

        channel = Channel.objects.get(pk=channel_id)

        if not channel.members.filter(id=user.id).exists():
            return HttpResponseBadRequest("You do not own that channel.")


        video = Video.objects.create(uploader=user)
        video.title = title
        video.desciption = desc
        video.channel = channel
        video.save()

        video_file = VideoFile.objects.create()
        video_file.save()
        video_file.format = file.content_type

        video.video_files.add(video_file)

        video_file.save()
        video.save()

        conn = boto.s3.connection.S3Connection(bitvid.dbinfo.AWS_ACCESS, bitvid.dbinfo.AWS_SECRET)

        bucket = conn.get_bucket(VIDEO_BUCKET_NAME)
        bucket.set_acl("public-read")
        key = boto.s3.key.Key(bucket)

        video_path = str(video.id) + "/" + "original.mp4"

        video_file.url = "http://"+VIDEO_BUCKET_NAME +".s3.amazonaws.com/"+video_path #"http://d6iy9bzn1qbz8.cloudfront.net/" + video_path

        key.key = video_path
        key.set_contents_from_filename(file.temporary_file_path())

        key.set_acl('public-read')

        conn.close()

        video_file.save()
        return HttpResponse(str(video.id))
示例#26
0
    def worker(base_path):
        mtime = path = 0
        while 1:
            try:
                mtime, queued_path = queue.get()

                path = queued_path
                if path is None:
                    return

                key = boto.s3.key.Key(bucket)

                if mtime is None:  # delete
                    try:
                        try:
                            key.key = bucket_prefix + path
                            key.delete()
                        except Exception:
                            logger.exception('deleting %r, retrying' % key.key)
                            time.sleep(9)
                            key.key = bucket_prefix + path
                            key.delete()
                    except Exception:
                        if index is not None:
                            # Failed to delete. Put the key back so we
                            # try again later
                            index[queued_path] = 1
                        raise

                elif mtime is GENERATE:
                    (path, s3mtime) = path
                    fspath = join(base_path, path.encode(encoding))
                    if exists(fspath):
                        # Someone created a file since we decided to
                        # generate one.
                        continue

                    fspath = dirname(fspath)
                    data = "Index of " + path[:-len(INDEX_HTML) - 1]
                    data = [
                        "<!-- generated -->",
                        "<html><head><title>%s</title></head><body>" % data,
                        "<h1>%s</h1><table>" % data,
                        "<tr><th>Name</th><th>Last modified</th><th>Size</th>"
                        "</tr>",
                    ]
                    for name in sorted(os.listdir(fspath)):
                        if name.startswith('.'):
                            continue  # don't index dot files
                        name_path = join(fspath, name)
                        if isdir(name_path):
                            name = name + '/'
                            size = '-'
                        else:
                            size = os.stat(name_path).st_size
                        mtime = time.ctime(os.stat(name_path).st_mtime)
                        name = name.decode(encoding)
                        data.append('<tr><td><a href="%s">%s</a></td>\n'
                                    '    <td>%s</td><td>%s</td></tr>' %
                                    (name, name, mtime, size))
                    data.append("</table></body></html>\n")
                    data = '\n'.join(data)

                    digest = hashlib.md5(data.encode(encoding)).hexdigest()
                    if digest != s3mtime:
                        # Note that s3mtime is either a previous
                        # digest or it's 0 (cus path wasn't in s3) or
                        # it's an s3 upload time.  The test above
                        # works in all of these cases.
                        key.key = bucket_prefix + path
                        key.set_metadata('generated', 'true')
                        try:
                            key.set_contents_from_string(
                                data,
                                headers={'Content-Type': 'text/html'},
                            )
                        except Exception:
                            logger.exception(
                                'uploading generated %r, retrying' % path)
                            time.sleep(9)
                            key.set_contents_from_string(
                                data,
                                headers={'Content-Type': 'text/html'},
                            )

                        if s3mtime:
                            # update (if it was add, mtime would be 0)
                            if cloudfront:
                                invalidations.append(path)

                    if index is not None:
                        index[path] = digest

                else:  # upload
                    try:
                        if had_index:
                            # We only store mtimes to the nearest second.
                            # We don't have a fudge factor, so there's a
                            # chance that someone might update the file in
                            # the same second, so we check if a second has
                            # passed and sleep if it hasn't.
                            now = time_time_from_sixtuple(
                                time.gmtime(time.time()))
                            if not now > mtime:
                                time.sleep(1)

                        key.key = bucket_prefix + path
                        path = join(base_path, path)
                        try:
                            key.set_contents_from_filename(
                                path.encode(encoding))
                        except Exception:
                            logger.exception('uploading %r %r, retrying' %
                                             (mtime, path))
                            time.sleep(9)
                            key.set_contents_from_filename(
                                path.encode(encoding))

                    except Exception:
                        if index is not None:
                            # Upload failed. Remove from index so we
                            # try again later (if the path is still
                            # around).
                            index.pop(queued_path)
                        raise

            except Exception:
                logger.exception('processing %r %r' % (mtime, path))
            finally:
                queue.task_done()
示例#27
0
文件: s4cmd.py 项目: kjallbring/s4cmd
  def upload(self, mpi, source, target, pos = 0, chunk = 0, part = 0):
    '''Thread worker for upload operation.'''
    s3url = S3URL(target)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)

    # Initialization: Set up multithreaded uploads.
    if not mpi:
      fsize = os.path.getsize(source)
      key = bucket.get_key(s3url.path)

      # optional checks
      if self.opt.dry_run:
        message('%s => %s', source, target)
        return
      elif self.opt.sync_check and self.sync_check(source, key):
        message('%s => %s (synced)', source, target)
        return
      elif not self.opt.force and key:
        raise Failure('File already exists: %s' % target)

      # Small file optimization.
      if fsize < self.opt.max_singlepart_upload_size:
        key = boto.s3.key.Key(bucket)
        key.key = s3url.path
        key.set_metadata('privilege',  self.get_file_privilege(source))
        key.set_contents_from_filename(source)
        message('%s => %s', source, target)
        return

      # Here we need to have our own md5 value because multipart upload calculates
      # different md5 values.
      mpu = bucket.initiate_multipart_upload(s3url.path, metadata = {'md5': self.file_hash(source), 'privilege': self.get_file_privilege(source)})

      for args in self.get_file_splits(mpu.id, source, target, fsize, self.opt.multipart_split_size):
        self.pool.upload(*args)
      return

    # Handle each part in parallel, post initialization.
    for mp in bucket.list_multipart_uploads():
      if mp.id == mpi.id:
        mpu = mp
        break
    if mpu is None:
      raise Failure('Could not find MultiPartUpload %s' % mpu_id)

    data = None
    with open(source, 'rb') as f:
      f.seek(pos)
      data = f.read(chunk)
    if not data:
      raise Failure('Unable to read data from source: %s' % source)

    mpu.upload_part_from_file(StringIO(data), part)

    # Finalize
    if mpi.complete():
      try:
        mpu.complete_upload()
        message('%s => %s', source, target)
      except Exception as e:
        mpu.cancel_upload()
        raise RetryFailure('Upload failed: Unable to complete upload %s.' % source)
示例#28
0
def upload(request):
    if request.method == "GET":

        return render(request, "upload.html")
    else:

        user = request.user
        file = request.FILES.get("file", None)

        if file is None:
            return HttpResponseBadRequest("You did not pass a file.")

        title = request.POST.get("title", "")
        desc = request.POST.get("desc", "")
        channel_id_str = request.POST.get("channel", "1")

        try:
            channel_id = int(channel_id_str)
        except ValueError:
            return HttpResponseBadRequest("Channel ID must be an integer")

        if title == "":
            return HttpResponseBadRequest("Title must not be empty.")

        if not re.match(r'^.{0,2000}$', title):
            return HttpResponseBadRequest(
                "Description must not be longer than 2000 characters.")

        if not Channel.objects.filter(pk=channel_id).exists():
            return HttpResponseBadRequest(
                "Channel ID must be a valid channel.")

        channel = Channel.objects.get(pk=channel_id)

        if not channel.members.filter(id=user.id).exists():
            return HttpResponseBadRequest("You do not own that channel.")

        video = Video.objects.create(uploader=user)
        video.title = title
        video.desciption = desc
        video.channel = channel
        video.save()

        video_file = VideoFile.objects.create()
        video_file.save()
        video_file.format = file.content_type

        video.video_files.add(video_file)

        video_file.save()
        video.save()

        conn = boto.s3.connection.S3Connection(bitvid.dbinfo.AWS_ACCESS,
                                               bitvid.dbinfo.AWS_SECRET)

        bucket = conn.get_bucket(VIDEO_BUCKET_NAME)
        bucket.set_acl("public-read")
        key = boto.s3.key.Key(bucket)

        video_path = str(video.id) + "/" + "original.mp4"

        video_file.url = "http://" + VIDEO_BUCKET_NAME + ".s3.amazonaws.com/" + video_path  #"http://d6iy9bzn1qbz8.cloudfront.net/" + video_path

        key.key = video_path
        key.set_contents_from_filename(file.temporary_file_path())

        key.set_acl('public-read')

        conn.close()

        video_file.save()
        return HttpResponse(str(video.id))