Ejemplo n.º 1
0
def S3Upload(upload_name, fileObj, bucket_name=None):
    print 'check upload args'
    if not bucket_name:
        raise ValueError('No Bucket Name')

    print 'conn'
    conn = S3.AWSAuthConnection(config.AWS_ACCESS_KEY_ID,
                                config.AWS_SECRET_ACCESS_KEY)

    content_type = mimetypes.guess_type(upload_name)[0]
    if not content_type:
        content_type = 'text/plain'
    print 'conn put'
    st = conn.put(bucket_name, upload_name, S3.S3Object(fileObj), {
        'x-amz-acl': 'public-read',
        'Content-Type': content_type
    })
    print 'end conn put'
    resp = st.http_response
    print 'resp', resp, resp.status
    if 200 != resp.status:
        print 'upload failed'
        print resp.msg
        return False
    print 'upload successed'
    return True
Ejemplo n.º 2
0
def upload_s3(fname, mimetype, uname=''):
	if not uname:
		uname = os.path.basename(fname)

	filedata = open(fname, 'rb').read()

	conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
	conn.put(settings.BUCKET_NAME, uname, S3.S3Object(filedata),
		{'x-amz-acl': 'public-read', 'Content-Type': mimetype})
Ejemplo n.º 3
0
def update_s3():
    conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    for line in sys.stdin:
        filename = os.path.normpath(line[:-1])
        if filename == '.' or not os.path.isfile(filename):
            continue  # Skip this, because it's not a file.
        print "Uploading %s" % filename
        filedata = open(filename, 'rb').read()
        content_type = mimetypes.guess_type(filename)[0]
        if not content_type:
            content_type = 'text/plain'
        conn.put(BUCKET_NAME, filename, S3.S3Object(filedata), {
            'x-amz-acl': 'public-read',
            'Content-Type': content_type
        })
Ejemplo n.º 4
0
 def save_gtfs_file(self, contents, user, gtfs_crawler, comments, filename):
     assert '@' in user
     # if setting is prod
     if tornado.options.options.shunt_s3:
         if filename.startswith("queue/"):
             filename = filename[len("queue/"):]
         if not os.path.exists("/tmp/gtfs_s3/queue"):
             os.makedirs("/tmp/gtfs_s3/queue")
         filename = os.path.join("/tmp/gtfs_s3/queue", filename)
         logging.info('writing %s' % filename)
         f = open(filename, 'wb')
         f.write(contents)
         f.close()
         f = open(filename + '.meta', 'wb')
         f.write(
             json.dumps(
                 dict(user=user,
                      gtfs_crawler=gtfs_crawler,
                      comments=comments)))
         f.close()
     else:
         obj = S3.S3Object(contents)
         obj.metadata['user'] = user
         obj.metadata['gtfs_crawler'] = gtfs_crawler
         obj.metadata['comments'] = comments
         logging.info('putting %r' % filename)
         self.conn.put("gtfs", filename, obj)
Ejemplo n.º 5
0
    def handleMessage(self, message):
        
        body = message.get_body()
        
        try:
            # Parse the request
            request = Request(body)
        except XMLError:
            # Just return if the request can't be parsed
            # This throws away the request
            return
        
        # Process the request
        # If an exception is thrown (other than our RequestErrors),
        # the message will remain in the queue to be processed again later.
        # Corrupt data is removed before the exception is raised, so the
        # request will (hopefully) succeed next time
		# TODO Add TTL to requests, process max 3 times, or something
        reply = request.process()
        
        # Write the reply to S3
        url = S3.uploadFile(request.id + '.xml', reply)
        
        # Put the url to the reply in the outqueue
        m = Message()
        m.set_body(url)
        outQueue.write(m)
Ejemplo n.º 6
0
def my_handler(event, context):
    IS_LOCAL = False
#while(True):
    srcs = slice_files()
    for asset_class in srcs:
        url = srcs[asset_class]

        starting_date = date.today()

        for d in dates(starting_date):
            if (starting_date-d).days > 3:
                break #Covers the case where we haven't run this for 10 days.

            s3 = S3.SDRWriter()

            filename = "{}_{}.csv".format(asset_class, d.strftime("%Y_%m_%d"))

            if "CUMULATIVE" in url and s3.exists(filename, local=IS_LOCAL):
                print("Skipping {} because it already exists".format(filename))
                continue

            s3.setup(filename, local=IS_LOCAL)

            sdr = SDR(url)

            for header, line in sdr.retrieve(d):
                if header is not None and not s3.is_header_written():
                    #print("Header:{}".format(header))
                    s3.write_header(header)
                elif line is not None:
                    #print("Entry:{}".format(line))
                    s3.write_row(line)

            s3.teardown()
    return {"status": 200, "message": "Finished"}
Ejemplo n.º 7
0
    def __init__(self,
                 config_path='./config.json',
                 thumbnail=False,
                 sample=None):
        self.config = None

        # standard lmdb environment for storing biblio entries by uuid
        self.env = None

        # lmdb environment for storing mapping between doi/pmcid and uuid
        self.env_doi = None

        # lmdb environment for keeping track of failures
        self.env_fail = None

        self._load_config(config_path)

        # boolean indicating if we want to generate thumbnails of front page of PDF
        self.thumbnail = thumbnail
        self._init_lmdb()

        # if a sample value is provided, indicate that we only harvest the indicated number of PDF
        self.sample = sample

        self.s3 = None
        if self.config["bucket_name"] is not None and len(
                self.config["bucket_name"]) is not 0:
            self.s3 = S3.S3(self.config)
Ejemplo n.º 8
0
 def get_s3_url(self):
     import secrets
     import S3
     import defaults
     gen = S3.QueryStringAuthGenerator(secrets.AWS_ID, secrets.AWS_SECRET_KEY)
     url = gen.get(defaults.bucket, self.get_real_name())
     return url
Ejemplo n.º 9
0
    def __init__(self, config_path='./config.json'):
        self.config = None
        self._load_config(config_path)

        self.s3 = None
        if self.config["bucket_name"] is not None and len(
                self.config["bucket_name"]) is not 0:
            self.s3 = S3.S3(self.config)
Ejemplo n.º 10
0
def list_s3(request):
    """
    List Amazon S3 bucket contents

    """
    if S3 is not None:
        conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        generator = S3.QueryStringAuthGenerator(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, calling_format=S3.CallingFormat.VANITY)
        generator.set_expires_in(300)
        bucket_entries = conn.list_bucket(settings.AWS_BUCKET_NAME).entries
        entries = []
        for entry in bucket_entries:
            entry.s3url = generator.get(settings.AWS_BUCKET_NAME, entry.key)
            entries.append(entry)
        return direct_to_template(request, 'export/list_s3.html', {'object_list': entries, 's3support': True})
    else:
        return direct_to_template(request, 'export/list_s3.html', {'object_list': [], 's3support': False})
Ejemplo n.º 11
0
 def connect_s3(self):
     if tornado.options.options.shunt_s3:
         logging.info('skipping s3 connection --shunt-s3')
         return
     aws_access_key_id = _utf8(tornado.options.options.aws_key)
     aws_secret_access_key = _utf8(tornado.options.options.aws_secret)
     self.conn = S3.AWSAuthConnection(aws_access_key_id,
                                      aws_secret_access_key)
Ejemplo n.º 12
0
 def setUp(self):
     self.generator = S3.QueryStringAuthGenerator(AWS_ACCESS_KEY_ID,
                                                  AWS_SECRET_ACCESS_KEY)
     if (self.generator.is_secure == True):
         self.connection = httplib.HTTPSConnection(
             self.generator.server_name)
     else:
         self.connection = httplib.HTTPConnection(
             self.generator.server_name)
Ejemplo n.º 13
0
def upload(filename):
    conn = S3.AWSAuthConnection(config.S3_ACCESS_KEY, config.S3_SECRET)
    result = conn.check_bucket_exists(config.S3_BUCKET)
    if result.status != 200:
        result = conn.create_located_bucket(config.S3_BUCKET, S3.Location.DEFAULT)

    assert 200 == conn.put(config.S3_BUCKET, os.path.basename(filename), read_file(filename)).http_response.status

    print "File %s successfully backed up to S3 (with same filename)." % filename
Ejemplo n.º 14
0
def main(version, input1):

    # parse arguments
    full_argments = parseargs(input1)
    argies = full_argments.copy()

    # initialize variables:
    (variables, varbsCount, varbs) = getVars(full_argments)

    # this is the random heuristic
    #variables = random_heuristic(variables)

    argments = tautology(
        full_argments)  # remove tautologies, just necessary once.

    # initialization of lists (args & assignments) and boolean (validity_check)
    validity_check = True
    assments = []
    backtrack = []
    units = []
    backtrack_counter = 0

    sys.setrecursionlimit(10**8)

    # start recursive function
    if version == 'S1':
        assments, backtrack_counter = s1.solve(argies, assments, variables,
                                               backtrack, backtrack_counter,
                                               argments, units)
    elif version == 'S2':
        while any(len(clause) == 1 for clause in argments) and validity_check:
            variables, assments = unit_propagation(variables, argments,
                                                   assments, units)
            argments, assments, validity_check = simplify(
                argments, assments, validity_check)
        units = []
        assments, backtrack_counter = s2.solve(argies, assments, variables,
                                               backtrack, backtrack_counter,
                                               argments, units)
    elif version == 'S3':
        while any(len(clause) == 1 for clause in argments) and validity_check:
            variables, assments = unit_propagation(variables, argments,
                                                   assments, units)
            argments, assments, validity_check = simplify(
                argments, assments, validity_check)
        #units = []
        assments, backtrack_counter = s3.solve(argies, assments, variables,
                                               backtrack, backtrack_counter,
                                               argments, units)

    if not validity_check:
        message = 'failure'
    else:
        message = 'Success! This formula is satisfiable, with the following assignments: '

    return assments, message, backtrack_counter
Ejemplo n.º 15
0
def Process(msg):
    job_id = msg.attrs["job_id"]
    JobContOutput.P(
        "Got a job completion msg. job_id:%s Terminsting the cluster ..." %
        job_id)
    TermCluster(job_id)

    JobReq.DeleteMsg(msg.attrs["job_req_msg_recript_handle"])
    _DeleteMsg(msg)
    S3.Sync()
Ejemplo n.º 16
0
def publish(filepath, s3bucket, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
            version):
    filename = filepath.split("/")[-1]
    s3key = "/".join([p['release.type'], p['project.key'], filename])

    print "Reading in content from %s" % filepath
    filedata = open(filepath, "rb").read()

    filehash = _sha(filedata).hexdigest()

    print "Preparing to upload %s to %s/%s" % (filename, s3bucket, s3key)

    content_type = mimetypes.guess_type(filename)[0]
    if content_type is None:
        content_type = 'text/plain'

    print "File appears to be %s" % content_type

    print "Connecting to S3..."
    conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)

    print "Checking if bucket %s exists..." % s3bucket
    check = conn.check_bucket_exists(s3bucket)
    if (check.status == 200):
        print "Uploading %s to %s/%s" % (filename, s3bucket, s3key)
        print conn.put(
            s3bucket, s3key, S3.S3Object(filedata), {
                'Content-Type': content_type,
                'x-amz-acl': 'public-read',
                'x-amz-meta-project.name': 'Spring Python',
                'x-amz-meta-release.type': p['release.type'],
                'x-amz-meta-bundle.version': version,
                'x-amz-meta-package.file.name': filename
            }).message

        print "Uploading SHA1 digest to %s/%s" % (s3bucket, s3key + '.sha1')
        print conn.put(s3bucket, s3key + '.sha1',
                       S3.S3Object(filehash + ' ' + filename + "\n"), {
                           'Content-Type': content_type,
                           'x-amz-acl': 'public-read'
                       }).message
    else:
        print "Error code %s: Unable to publish" % check.status
Ejemplo n.º 17
0
def retrieve(filename):
    conn = S3.AWSAuthConnection(config.S3_ACCESS_KEY, config.S3_SECRET)
    assert 200 == conn.check_bucket_exists(config.S3_BUCKET).status

    result = conn.get(config.S3_BUCKET, filename)
    assert 200 == result.http_response.status

    f = open(filename, "w")
    f.write(result.object.data)
    f.close()

    print "File %s successfully retrieved (with same filename)." % filename
Ejemplo n.º 18
0
def list_files():
    conn = S3.AWSAuthConnection(config.S3_ACCESS_KEY, config.S3_SECRET)
    result = conn.check_bucket_exists(config.S3_BUCKET)
    if result.status != 200:
        result = conn.create_located_bucket(config.S3_BUCKET,
                                            S3.Location.DEFAULT)

    result = conn.list_bucket(config.S3_BUCKET)
    assert 200 == result.http_response.status
    print "Size\t\tKey"
    for entry in result.entries:
        print "%s\t%s" % (entry.size, entry.key)
Ejemplo n.º 19
0
    def create(self, content, mimetype, metadata):
        key = self._generate_valid_key()
        obj = S3.S3Object(content, metadata)
        self.conn.put(self.bucket, key, obj, {
            'x-amz-storage-class': 'REDUCED_REDUNDANCY',
            'Content-Type': mimetype,
        })

        aclxml = self.conn.get_acl(self.bucket, key).body
        acl = parseString(aclxml)
        acl.getElementsByTagName('AccessControlList')[0].appendChild(_pub_read_grant)
        self.conn.put_acl(self.bucket, key, acl.toxml())
        return self._published_url(key)
Ejemplo n.º 20
0
def push_media_to_s3(subpath, content_type):
    """
    Upload a subpath of the media directory to S3.
    """
    if not settings.USE_S3:
        return
    import S3
    conn = S3.AWSAuthConnection(settings.S3_ACCESS_KEY, settings.S3_SECRET_KEY)
    localPath = os.path.join(settings.MEDIA_ROOT, subpath)
    obj = S3.S3Object(file(localPath).read())
    tries = 5
    while True:
        try:
            conn.put(settings.S3_BUCKET, settings.S3_PATH + subpath, obj, {
                'Content-Type': content_type,
                'x-amz-acl': 'public-read'
            })
        except:
            tries -= 1
            if not tries:
                raise
        else:
            return
Ejemplo n.º 21
0
def files(request, project_name):
    """Files for a project. Shows the files uploaded for a project.
    Actions available:
    Add files:  Owner Participant
    """
    project = get_project(request, project_name)
    gen = S3.QueryStringAuthGenerator(secrets.AWS_ID, secrets.AWS_SECRET_KEY)
    addfileform = bforms.AddFileForm(project=project, user=request.user)
    if request.method == 'POST':
        if request.POST.has_key('Addfile'):
            addfileform = bforms.AddFileForm(project, request.user,
                                             request.POST, request.FILES)
            if addfileform.is_valid():
                addfileform.save()
                return HttpResponseRedirect('.')
        if request.POST.has_key('fileid'):
            fileid = int(request.POST['fileid'])
            file = ProjectFile.objects.get(project=project, id=fileid)
            conn = S3.AWSAuthConnection(secrets.AWS_ID, secrets.AWS_SECRET_KEY)
            for revision in file.projectfileversion_set.all():
                conn.delete(defaults.bucket, revision.revision_name)
            file.delete()
    payload = locals()
    return render(request, 'project/files.html', payload)
Ejemplo n.º 22
0
def export_to_s3(request):
    """
    Dump the database and upload the dump to Amazon S3

    """
    if request.method == 'POST':
        if settings.DATABASE_ENGINE == 'mysql':
            cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
        elif settings.DATABASE_ENGINE == 'sqlite3':
            cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
        else:
            raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
        stdin, stdout = os.popen2(cmd)
        stdin.close()
        file_name = 'dump_%s.sql.bz2' % time.strftime('%Y%m%d-%H%M')
        conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        res = conn.put(settings.AWS_BUCKET_NAME, file_name, S3.S3Object(stdout.read()), {'Content-Type': 'application/x-bzip2',})
        if res.http_response.status == 200:
            request.user.message_set.create(message="%s" % _(u"%(filename)s saved on Amazon S3") % {'filename': file_name})
        else:
            request.user.message_set.create(message="%s" % _(u"Upload failed with %(status)s") % {'status': res.http_response.status})
        stdout.close()
        return HttpResponseRedirect('/admin/')
    return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database to S3'), 's3support': (S3 is not None), 's3': True})
Ejemplo n.º 23
0
    def __init__(self, config_path='./config.json'):
        self.config = None

        # standard lmdb environment for storing processed biblio entry uuid
        self.env = None

        # lmdb environment for keeping track of PDF annotation failures
        self.env_fail = None

        self._load_config(config_path)
        self._init_lmdb()

        if self.config['bucket_name'] is not None and len(
                self.config['bucket_name']) > 0:
            self.s3 = S3.S3(self.config)
Ejemplo n.º 24
0
 def __init__(self, access_key_id, secret_access_key, bucket,
              key_length=8, publish_domain=None, http=None,
              generate_key=generate_random_word):
     if publish_domain is None:
         publish_domain = '%s.%s' % (bucket, S3.DEFAULT_HOST)
     if http is None:
         http = httplib2.Http()
     self.conn = S3.AWSAuthConnection(access_key_id, secret_access_key)
     self.bucket = bucket
     self.key_length = key_length
     self.publish_domain = publish_domain
     self.http = http
     self.generate_key = generate_key
     if self.conn.check_bucket_exists(bucket).status == 404:
         self.conn.create_located_bucket(bucket, S3.Location.DEFAULT)
Ejemplo n.º 25
0
def onSubscribe(message):

    command = Box(json.loads(str(message, 'utf-8')))
    startTime = datetime.datetime.strptime(command.startTime, "%Y/%m/%d %H:%M:%S")
    seconds = command.seconds
    endTime = startTime + timedelta(seconds=seconds) 
    fileName = "/tmp/output.mp4"

    mp4 = Mp4.Mp4(dataPath)
    mp4.create(startTime, endTime, fileName)
    print("{} created.".format(fileName))

    s3 = S3.S3(identityPoolId)
    key = "{}.mp4".format(startTime)
    s3.putObject(bucketName, key, fileName)
Ejemplo n.º 26
0
Archivo: run_S3.py Proyecto: CU-BIC/S3
def main():
    # Set the API key for usage in the S# module...
    S3.API_KEY = args.api_key
    S3.IMG_DIR = args.output_dir
    S3.VERBOSE = args.verbose
    S3.IMAGE_WIDTH = args.width
    S3.IMAGE_HEIGHT = args.height
    S3.NUM_STEPS = args.walk_steps

    # Get Regional Bounds, and pass the exclusion cities to get their polygons
    search_region, exclude = S3.get_regional_polygon(args.coords,
                                                     args.exclusions)

    print search_region
    # Begin the sampling procedure!
    search_area(search_region, exclude, args.epsilon)
Ejemplo n.º 27
0
def main(args):
    word_embs = word_embeddings.load_embeddings(args.embs_path)

    with open(args.output_jsonl, 'w') as out:
        with open(args.input_jsonl, 'r') as f:
            for line in f:
                instance = json.loads(line)
                # The input summaries to S3 are lists of sentences. The example
                # just passes the whole text in as 1 sentence without pre-sentence tokenizing
                # it, so we will do the same. But the input summaries are expected
                # to just be 1 string each, so we wrap them in an extra list
                summary = [instance['summary']]
                references = [[reference]
                              for reference in instance['references']]
                pyr, resp = S3.S3(references, summary, word_embs,
                                  args.model_folder)
                out.write(json.dumps({'pyr': pyr, 'resp': resp}) + '\n')
Ejemplo n.º 28
0
 def __init__(self, upload_to='', stored_file_implementation=StoredFile):
     # try to work around bug S3 code which uses bad names of days
     # http://code.google.com/p/boto/issues/detail?id=140
     # but workaround doesn't work :(
     #import locale
     #    locale.setlocale(locale.LC_TIME, 'en_US.utf8')
     #    print 'create S3 storage'
     import settings
     import S3
     self.upload_to = upload_to
     conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID,
                                 settings.AWS_SECRET_ACCESS_KEY)
     #        _generator = S3.QueryStringAuthGenerator( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY )
     if (conn.check_bucket_exists(settings.AWS_BUCKET_NAME).status == 200):
         pass
     else:
         conn.create_located_bucket(settings.AWS_BUCKET_NAME,
                                    settings.AWS_LOCATION).message
    def __init__(self, config_path='./config.json'):
        self.config = None

        # standard lmdb environment for storing biblio entries by uuid
        self.env = None

        # lmdb environment for storing mapping between doi and uuid
        self.env_doi = None

        # lmdb environment for keeping track of failures
        self.env_fail = None

        self._load_config(config_path)
        self._init_lmdb()

        self.s3 = None
        if self.config["bucket_name"] is not None and len(
                self.config["bucket_name"]) is not 0:
            self.s3 = S3.S3(self.config)
Ejemplo n.º 30
0
    def _make_request(self, method, bucket='', key='', query_args={}, headers={}, data='', metadata={}):

        server = ''
        if bucket == '':
            server = self.server
        elif self.calling_format == S3.CallingFormat.SUBDOMAIN:
            server = "%s.%s" % (bucket, self.server)
        elif self.calling_format == S3.CallingFormat.VANITY:
            server = bucket
        else:
            server = self.server

        path = ''

        if (bucket != '') and (self.calling_format == S3.CallingFormat.PATH):
            path += "/%s" % bucket

        # add the slash after the bucket regardless
        # the key will be appended if it is non-empty
        path += "/%s" % urllib.quote_plus(key)


        # build the path_argument string
        # add the ? in all cases since 
        # signature and credentials follow path args
        if len(query_args):
            path += "?" + query_args_hash_to_string(query_args)

        is_secure = self.is_secure
        host = "%s:%d" % (server, self.port)
        while True:
            if (is_secure):
                connection = httplib.HTTPSConnection(host)
            else:
                connection = httplib.HTTPConnection(host)

            final_headers = S3.merge_meta(headers, metadata);
            # add auth header
            self._add_aws_auth_header(final_headers, method, bucket, key, query_args)
            
            return connection, path, final_headers
Ejemplo n.º 31
0
def transcode(output, input):
    name, _ = S3.check_ext_name(input)

    AVI_output = output / '{}_avi.avi'.format(name)
    command = f"ffmpeg -i {input} -c:v copy -c:a copy {AVI_output}"
    os.system(command)
    #displayVideo(AVI_output)

    VP9_output = output / '{}_vp9.mp4'.format(name)
    command = f'ffmpeg -i {input} -c:v libvpx-vp9  {VP9_output}'
    os.system(command)
    #displayVideo(VP9_output)

    VP8_output = output / '{}_vp8.mkv'.format(name)
    command = f"ffmpeg -i {input} -c:v libvpx -qmin 0 -qmax 50 -crf 5 -b:v 1M -c:a libvorbis {VP8_output}"
    os.system(command)
    #displayVideo(VP8_output)

    H265_output = output / '{}_h265.mp4'.format(name)
    command = f'ffmpeg -i {input} -c:v libx265 -crf 26 -preset fast -c:a aac -b:a 128k {H265_output}'
    os.system(command)
Ejemplo n.º 32
0
 def save(self):
     conn = S3.AWSAuthConnection(secrets.AWS_ID, secrets.AWS_SECRET_KEY)
     uploaded_filename = self.cleaned_data['filename'].name
     filename = '/%s/%s' % (self.project, uploaded_filename)
     content = self.cleaned_data['filename'].read()
     try:
         old_file = self.project.projectfile_set.get(
             filename=uploaded_filename)
         versions = old_file.projectfileversion_set.all().count()
         split_f = filename.rsplit('.', 1)
         name_no_ext = ''.join(split_f[:-1])
         filename = '%s-%s.%s' % (name_no_ext, versions + 1, split_f[-1])
         response = conn.put(defaults.bucket, filename, content)
         saved_file = old_file
         saved_file_revision = ProjectFileVersion(file=saved_file,
                                                  revision_name=filename,
                                                  user=self.user,
                                                  size=len(content))
         saved_file_revision.save()
         saved_file.current_revision = saved_file_revision
         saved_file.total_size += saved_file_revision.size
         saved_file.save()
     except ProjectFile.DoesNotExist, e:
         split_f = filename.rsplit('.', 1)
         name_no_ext = ''.join(split_f[:-1])
         filename = '%s-%s.%s' % (name_no_ext, 1, split_f[-1])
         response = conn.put(defaults.bucket, filename, content)
         saved_file = ProjectFile(project=self.project,
                                  filename=uploaded_filename,
                                  total_size=0)
         saved_file.save()
         saved_file_revision = ProjectFileVersion(file=saved_file,
                                                  revision_name=filename,
                                                  user=self.user,
                                                  size=len(content))
         saved_file_revision.save()
         saved_file.current_revision = saved_file_revision
         saved_file.total_size = saved_file_revision.size
         saved_file.save()