Ejemplo n.º 1
0
 def do_delete_object(self, account_name, container_name, object_name):
     """Handle object-level DELETE operations."""
     res = self.conn.execute(sql.select([
         sql.objects.c.uuid
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.name == object_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if not res:
         self.send_error(httplib.NOT_FOUND)
         return
     (fn_uuid,) = res
     with self.conn.begin():
         self.conn.execute(sql.objects.update().where(
             sql.objects.c.uuid == fn_uuid
         ).values(
             deleted=True
         ))
     self.http.send_response(httplib.NO_CONTENT)
     self.http.end_headers()
Ejemplo n.º 2
0
 def purge_deleted_objects(self):
     to_purge = []
     for (fn_uuid, meta) in self.conn.execute(
         sql.select([sql.objects.c.uuid, sql.objects.c.meta]).where(sql.objects.c.deleted == True)
     ):
         meta = json.loads(meta)
         to_purge.append((fn_uuid, meta))
     if len(to_purge) > 0:
         print("Starting threads...")
         for (fn_uuid, meta) in to_purge:
             for peer in meta["disk_peers"]:
                 if not peer in self.config["peers"]:
                     continue
                 self.sema.acquire(blocking=True)
                 t = threading.Thread(target=self.purge_thread, args=(fn_uuid, peer))
                 t.start()
         print("Waiting for threads to finish...")
         while threading.active_count() > 1:
             time.sleep(0.5)
         print("All threads done.")
         for (fn_uuid, meta) in to_purge:
             self.conn.execute(sql.objects.delete().where(sql.objects.c.uuid == fn_uuid))
             contentdir = os.path.join(self.config["staging_files_dir"], fn_uuid[0:2], fn_uuid[2:4])
             if os.path.isfile(os.path.join(contentdir, fn_uuid)):
                 os.remove(os.path.join(contentdir, fn_uuid))
     print("Purged %d objects" % len(to_purge))
Ejemplo n.º 3
0
 def do_post_object(self, account_name, container_name, object_name):
     """Handle object-level POST operations."""
     res = self.conn.execute(sql.select([
         sql.objects.c.uuid,
         sql.objects.c.expires
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.name == object_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if not res:
         self.send_error(httplib.NOT_FOUND)
         return
     (fn_uuid, expires) = res
     if expires and expires <= time.time():
         self.send_error(httplib.NOT_FOUND)
         return
     user_meta = {}
     for header in self.http.headers:
         if header.lower().startswith('x-object-meta-'):
             user_meta[header[14:]] = self.http.headers[header]
     last_modified = time.time()
     with self.conn.begin():
         self.conn.execute(sql.objects.update().where(
             sql.objects.c.uuid == fn_uuid
         ).values(
             user_meta=json.dumps(user_meta),
             last_modified=last_modified
         ))
     self.http.send_response(httplib.NO_CONTENT)
     self.http.end_headers()
Ejemplo n.º 4
0
 def do_get_container(self, account_name, container_name):
     """Handle container-level GET operations."""
     (objects,) = self.conn.execute(sql.select([
         sql.count('*'),
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if objects == 0:
         self.send_error(httplib.NOT_FOUND)
         return
     s = sql.select([
         sql.objects.c.name,
         sql.objects.c.bytes,
         sql.objects.c.last_modified,
         sql.objects.c.expires,
         sql.objects.c.meta
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.deleted == False
     ).order_by(
         sql.objects.c.name
     )
     s = self.apply_list_query_params(s, sql.objects.c.name)
     out = []
     for (name, bytes, last_modified, expires, meta) in self.conn.execute(s):
         if expires and expires <= time.time():
             continue
         if meta:
             meta = json.loads(meta)
         else:
             meta = {}
         content_type = 'application/octet-stream'
         if 'content_type' in meta:
             content_type = meta['content_type']
         out.append({'name': name, 'hash': meta['hash'], 'bytes': int(bytes), 'last_modified': float(last_modified), 'content_type': content_type})
     self.output_file_list(out, 'container', container_name, 'object')
Ejemplo n.º 5
0
 def authenticate_token(self, user_token):
     res = self.conn.execute(sql.select([
         sql.tokens_cache.c.account
     ]).where(
         sql.tokens_cache.c.id == user_token
     ).where(
         sql.tokens_cache.c.expires > time.time()
     )).fetchone()
     if not res:
         return False
     (token_account,) = res
     return token_account
Ejemplo n.º 6
0
 def do_head_account(self, account_name):
     """Handle account-level HEAD operations."""
     (objects, containers, bytes) = self.conn.execute(sql.select([
         sql.count('*'),
         sql.count(sql.distinct(sql.objects.c.container)),
         sql.sum(sql.objects.c.bytes)
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     self.http.send_response(httplib.NO_CONTENT)
     self.http.send_header('X-Account-Container-Count', containers)
     self.http.send_header('X-Account-Bytes-Used', bytes)
     self.http.send_header('X-Account-Object-Count', objects)
     self.http.end_headers()
Ejemplo n.º 7
0
 def do_delete_container(self, account_name, container_name):
     """Handle container-level DELETE operations."""
     (objects,) = self.conn.execute(sql.select([
         sql.count('*')
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if objects > 0:
         self.send_error(httplib.CONFLICT)
         return
     self.http.send_response(httplib.NO_CONTENT)
     self.http.end_headers()
Ejemplo n.º 8
0
 def process_request(self, reqpath):
     """Process Version 1.0 TempAuth commands."""
     r_fn = reqpath.strip('/').split('/')
     if not r_fn[0] == 'v1.0':
         return False
     if not self.http.server.config['auth_tempauth']['storage_url']:
         return False
     if len(r_fn) > 1:
         self.http.send_error(httplib.BAD_REQUEST)
         return True
     if not 'x-auth-user' in self.http.headers:
         self.http.send_error(httplib.BAD_REQUEST)
         return True
     username = self.http.headers['x-auth-user']
     password = self.http.headers['x-auth-key']
     res = self.conn.execute(sql.select([
         sql.tempauth_users.c.account,
         sql.tempauth_users.c.password
     ]).where(
         sql.tempauth_users.c.username == username
     )).fetchone()
     if not res:
         self.http.send_error(httplib.UNAUTHORIZED)
         return True
     (account_name, password_crypt) = res
     if not unladen.utils.passwords.check_password(password_crypt, password):
         self.http.send_error(httplib.UNAUTHORIZED)
         return True
     token = str(uuid.uuid4())
     expires = int(time.time() + 86400)
     # Since this is a local provider, we cheat a bit and just add
     # the token directly to tokens_cache.
     with self.conn.begin():
         self.conn.execute(sql.tokens_cache.insert().values(
             id=token,
             account=account_name,
             expires=expires,
             source='auth_tempauth'
         ))
     self.http.send_response(httplib.NO_CONTENT)
     storage_url = self.http.server.config['auth_tempauth']['storage_url']
     self.http.send_header('X-Storage-Url', '%s/%s' % (storage_url, account_name))
     self.http.send_header('X-Unladen-Base-Url', storage_url)
     self.http.send_header('X-Auth-Token', token)
     self.http.end_headers()
     return True
Ejemplo n.º 9
0
 def do_head_container(self, account_name, container_name):
     """Handle container-level HEAD operations."""
     (objects, bytes) = self.conn.execute(sql.select([
         sql.count('*'),
         sql.sum(sql.objects.c.bytes)
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if objects == 0:
         self.send_error(httplib.NOT_FOUND)
         return
     self.http.send_response(httplib.NO_CONTENT)
     self.http.send_header('X-Container-Bytes-Used', bytes)
     self.http.send_header('X-Container-Object-Count', objects)
     self.http.end_headers()
Ejemplo n.º 10
0
 def do_get_account(self, account_name):
     """Handle account-level GET operations."""
     s = sql.select([
         sql.objects.c.container,
         sql.count('*'),
         sql.sum(sql.objects.c.bytes)
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.deleted == False
     ).group_by(
         sql.objects.c.container
     ).order_by(
         sql.objects.c.container
     )
     s = self.apply_list_query_params(s, sql.objects.c.container)
     out = []
     for (container_name, count, bytes) in self.conn.execute(s):
         out.append({'name': container_name, 'count': int(count), 'bytes': int(bytes)})
     self.output_file_list(out, 'account', account_name, 'container')
Ejemplo n.º 11
0
 def do_head_file_container(self):
     """Handle file container-level HEAD operations."""
     if len(self.http.server.config['stores']) == 0:
         self.send_error(httplib.BAD_REQUEST)
         return
     (files, bytes) = self.conn.execute(sql.select([
         sql.count('*'),
         sql.sum(sql.files.c.bytes_disk)
     ]).where(
         sql.files.c.uploader == self.authenticated_account
     )).fetchone()
     if not bytes:
         bytes = 0
     total_config_bytes = 0
     for store in self.http.server.config['stores']:
         total_config_bytes = total_config_bytes + self.http.server.config['stores'][store]['size']
     self.http.send_response(httplib.NO_CONTENT)
     self.http.send_header('X-Container-Bytes-Used', bytes)
     self.http.send_header('X-Container-Object-Count', files)
     self.http.send_header('X-Unladen-Node-Capacity', total_config_bytes)
     self.http.end_headers()
Ejemplo n.º 12
0
 def do_delete_file(self, fn_uuid):
     """Handle file-level DELETE operations."""
     if len(self.http.server.config['stores']) == 0:
         self.send_error(httplib.BAD_REQUEST)
         return
     res = self.conn.execute(sql.select([
         sql.files.c.store
     ]).where(
         sql.files.c.uuid == fn_uuid
     )).fetchone()
     if not res:
         self.send_error(httplib.NOT_FOUND)
         return
     (store,) = res
     store_dir = self.http.server.config['stores'][store]['directory']
     with self.conn.begin():
         self.conn.execute(sql.files.delete().where(
             sql.files.c.uuid == fn_uuid
         ))
     os.remove(os.path.join(store_dir, fn_uuid[0:2], fn_uuid[2:4], fn_uuid))
     self.http.send_response(httplib.NO_CONTENT)
     self.http.end_headers()
Ejemplo n.º 13
0
 def do_get_file_container(self):
     """Handle file container-level GET operations."""
     s = sql.select([
         sql.files.c.uuid,
         sql.files.c.bytes_disk,
         sql.files.c.created,
         sql.files.c.meta
     ]).where(
         sql.files.c.uploader == self.authenticated_account
     ).order_by(
         sql.files.c.uuid
     )
     s = self.apply_list_query_params(s, sql.files.c.uuid)
     out = []
     for (fn_uuid, bytes, last_modified, meta) in self.conn.execute(s):
         if meta:
             meta = json.loads(meta)
         else:
             meta = {}
         content_type = 'application/octet-stream'
         out.append({'name': fn_uuid, 'hash': meta['hash'], 'bytes': int(bytes), 'last_modified': float(last_modified), 'content_type': content_type})
     self.output_file_list(out, 'container', container_name, 'object')
Ejemplo n.º 14
0
 def do_get_file(self, fn_uuid):
     """Handle file-level GET operations."""
     if len(self.http.server.config['stores']) == 0:
         self.send_error(httplib.BAD_REQUEST)
         return
     if not fn_uuid:
         self.send_error(httplib.BAD_REQUEST)
         return
     res = self.conn.execute(sql.select([
         sql.files.c.bytes_disk,
         sql.files.c.store,
         sql.files.c.created,
         sql.files.c.meta
     ]).where(
         sql.files.c.uuid == fn_uuid
     )).fetchone()
     if not res:
         self.send_error(httplib.NOT_FOUND)
         return
     (length, store, last_modified, meta) = res
     meta = json.loads(meta)
     store_dir = self.http.server.config['stores'][store]['directory']
     self.conn.close()
     self.http.send_response(httplib.OK)
     self.http.send_header('Content-Type', 'application/octet-stream')
     self.http.send_header('Content-Length', length)
     self.http.send_header('Last-Modified', self.http.date_time_string(last_modified))
     self.http.send_header('X-Timestamp', last_modified)
     self.http.send_header('ETag', meta['hash'])
     self.http.end_headers()
     if self.http.command == 'HEAD':
         return
     with open(os.path.join(store_dir, fn_uuid[0:2], fn_uuid[2:4], fn_uuid), 'rb') as r:
         blk = r.read(1024)
         while blk:
             self.http.wfile.write(blk)
             blk = r.read(1024)
Ejemplo n.º 15
0
 def peers_maint(self):
     self.cluster_peers_cache = {}
     sql_peers = {}
     todelete = []
     for (peer, peer_updated, storage_url, token, token_expires, total_size, used_size) in self.conn.execute(
         sql.select(
             [
                 sql.cluster_peers.c.peer,
                 sql.cluster_peers.c.peer_updated,
                 sql.cluster_peers.c.storage_url,
                 sql.cluster_peers.c.token,
                 sql.cluster_peers.c.token_expires,
                 sql.cluster_peers.c.total_size,
                 sql.cluster_peers.c.used_size,
             ]
         )
     ):
         if peer not in self.config["peers"]:
             todelete.append(peer)
             continue
         sql_peers[peer] = (peer_updated, storage_url, token, token_expires, total_size, used_size)
     for peer in todelete:
         self.conn.execute(sql.cluster_peers.delete().where(sql.cluster_peers.c.peer == peer))
     for peer in self.config["peers"]:
         if peer in sql_peers:
             (peer_updated, storage_url, token, token_expires, total_size, used_size) = sql_peers[peer]
         else:
             (peer_updated, storage_url, token, token_expires, total_size, used_size) = (0, None, None, 0, 0, 0)
         now = int(time.time())
         if time.time() > ((token_expires - peer_updated) / 2):
             url = urlparse.urlparse(self.config["peers"][peer]["auth"]["url"])
             if url.scheme == "https":
                 h = httplib.HTTPSConnection(url.netloc, timeout=5)
             else:
                 h = httplib.HTTPConnection(url.netloc, timeout=5)
             h.putrequest("GET", url.path)
             h.putheader("X-Auth-User", self.config["peers"][peer]["auth"]["username"])
             h.putheader("X-Auth-Key", self.config["peers"][peer]["auth"]["password"])
             h.endheaders()
             res = h.getresponse()
             print(res.getheaders())
             token = res.getheader("x-auth-token")
             token_expires = now + 86400
             storage_url = res.getheader("x-storage-url")
         url = urlparse.urlparse(storage_url)
         if url.scheme == "https":
             h = httplib.HTTPSConnection(url.netloc, timeout=5)
         else:
             h = httplib.HTTPConnection(url.netloc, timeout=5)
         h.putrequest("HEAD", "%s/%s" % (url.path, "808f1b75-a011-4ea7-82a5-e6aad1092fea"))
         h.putheader("X-Auth-Token", token)
         h.endheaders()
         res = h.getresponse()
         print(res.getheaders())
         total_size = int(res.getheader("x-unladen-node-capacity"))
         used_size = int(res.getheader("x-container-bytes-used"))
         if peer in sql_peers:
             self.conn.execute(
                 sql.cluster_peers.update()
                 .where(sql.cluster_peers.c.peer == peer)
                 .values(
                     peer_updated=now,
                     storage_url=storage_url,
                     token=token,
                     token_expires=token_expires,
                     total_size=total_size,
                     used_size=used_size,
                 )
             )
         else:
             self.conn.execute(
                 sql.cluster_peers.insert().values(
                     peer=peer,
                     peer_updated=now,
                     storage_url=storage_url,
                     token=token,
                     token_expires=token_expires,
                     total_size=total_size,
                     used_size=used_size,
                 )
             )
         self.cluster_peers_cache[peer] = (peer_updated, storage_url, token, token_expires, total_size, used_size)
Ejemplo n.º 16
0
 def check_store_balance(self):
     store_stats = {}
     total_bytes = 0
     total_objects = 0
     for (store, objects, bytes) in self.conn.execute(
         sql.select([sql.files.c.store, sql.count("*"), sql.sum(sql.files.c.bytes_disk)]).group_by(sql.files.c.store)
     ):
         total_bytes = total_bytes + bytes
         total_objects = total_objects + objects
         store_stats[store] = (objects, bytes)
     if total_objects == 0:
         return
     total_config_bytes = 0
     for store in self.config["stores"]:
         total_config_bytes = total_config_bytes + self.config["stores"][store]["size"]
     rebalance_stores = False
     total_transfer_out = 0
     total_transfer_in = 0
     transfer_d = {}
     for store in sorted(self.config["stores"]):
         if store in store_stats:
             (objects, bytes) = store_stats[store]
         else:
             (objects, bytes) = (0, 0)
         objects_pct = float(objects) / float(total_objects)
         bytes_pct = float(bytes) / float(total_bytes)
         config_pct = float(self.config["stores"][store]["size"]) / float(total_config_bytes)
         config_pct_delta = bytes_pct - config_pct
         print(
             "%s: %d objects (%0.02f%%), %d bytes (%0.02f%%, %0.02f%% from config)"
             % (store, objects, objects_pct * 100.0, bytes, bytes_pct * 100.0, config_pct_delta * 100.0)
         )
         should_have = int(float(total_bytes) * config_pct)
         print("    Should have %d bytes" % should_have)
         transfer = bytes - should_have
         transfer_d[store] = transfer
         if transfer > 0:
             print("    Transfer %d bytes out" % abs(transfer))
             total_transfer_out = total_transfer_out + abs(transfer)
         else:
             print("    Transfer %d bytes in" % abs(transfer))
             total_transfer_in = total_transfer_in + abs(transfer)
         if abs(config_pct_delta) > 0.01:
             rebalance_stores = True
     if rebalance_stores:
         print("Time to rebalance the stores")
     else:
         print("Stores are sufficiently balanced")
         return
     transfer_orders = []
     for store_from in transfer_d:
         if transfer_d[store_from] < 0:
             continue
         stores_transfer_to = {}
         for store_to in transfer_d:
             if transfer_d[store_to] > 0:
                 continue
             x = int(float(abs(transfer_d[store_to])) / float(total_transfer_in) * float(transfer_d[store_from]))
             print("Transfer %d bytes from %s to %s" % (x, store_from, store_to))
             if x > 0:
                 stores_transfer_to[store_to] = x
         bytes_left = x
         res = self.conn.execute(
             sql.select([sql.files.c.uuid, sql.files.c.bytes_disk])
             .where(sql.files.c.store == store_from)
             .order_by(sql.desc(sql.files.c.bytes_disk))
         )
         for (fn_uuid, bytes) in res:
             store_to = None
             bytes_left = 0
             for store_to_candidate in stores_transfer_to:
                 if float(bytes) / float(stores_transfer_to[store_to_candidate]) < 1.05:
                     store_to = store_to_candidate
                     bytes_left = stores_transfer_to[store_to_candidate]
                     break
             if not store_to:
                 continue
             print("Move %s (%d) from %s to %s" % (fn_uuid, bytes, store_from, store_to))
             transfer_orders.append((fn_uuid, store_from, store_to))
             bytes_left = bytes_left - bytes
             if bytes_left <= 0:
                 del (stores_transfer_to[store_to])
             else:
                 stores_transfer_to[store_to] = bytes_left
             if len(stores_transfer_to) == 0:
                 res.close()
                 break
     print("")
     print("")
     random.shuffle(transfer_orders)
     for (fn_uuid, store_from, store_to) in transfer_orders:
         print("%s %s %s" % (fn_uuid, store_from, store_to))
         store_dir_from = self.config["stores"][store_from]["directory"]
         contentdir_from = os.path.join(store_dir_from, fn_uuid[0:2], fn_uuid[2:4])
         store_dir_to = self.config["stores"][store_to]["directory"]
         contentdir_to = os.path.join(store_dir_to, fn_uuid[0:2], fn_uuid[2:4])
         if not os.path.isdir(contentdir_to):
             os.makedirs(contentdir_to)
         shutil.copy(os.path.join(contentdir_from, fn_uuid), os.path.join(contentdir_to, fn_uuid))
         self.conn.execute(sql.files.update().where(sql.files.c.uuid == fn_uuid).values(store=store_to))
         os.remove(os.path.join(contentdir_from, fn_uuid))
Ejemplo n.º 17
0
    def object_replication(self):
        peer_weights = {}
        for peer in self.cluster_peers_cache:
            (peer_updated, storage_url, token, token_expires, total_size, used_size) = self.cluster_peers_cache[peer]
            peer_weights[peer] = total_size
        toadd = []
        todel = []
        for (fn_uuid, meta) in self.conn.execute(
            sql.select([sql.objects.c.uuid, sql.objects.c.meta]).where(sql.objects.c.deleted == False)
        ):
            meta = json.loads(meta)
            existing_peers = []
            for peer in meta["disk_peers"]:
                if peer in self.config["peers"]:
                    existing_peers.append(peer)
            new_peers = self.choose_peers(existing_peers, peer_weights, 3.0)
            for peer in new_peers:
                if not peer in existing_peers:
                    toadd.append((fn_uuid, peer, existing_peers, meta["disk_bytes"], meta["disk_hash"]))
            for peer in existing_peers:
                if not peer in new_peers:
                    todel.append((fn_uuid, peer))

        staging_delete = []
        if len(toadd) > 0:
            print("Starting threads...")
            for (fn_uuid, peer, existing_peers, bytes_disk, md5_hash) in toadd:
                self.sema.acquire(blocking=True)
                t = threading.Thread(
                    target=self.replication_add_thread, args=(fn_uuid, peer, existing_peers, bytes_disk, md5_hash)
                )
                t.start()
            print("Waiting for threads to finish...")
            while threading.active_count() > 1:
                time.sleep(0.5)
            print("All threads done.")
            for (fn_uuid, peer, existing_peers, bytes_disk, md5_hash) in toadd:
                (meta,) = self.conn.execute(
                    sql.select([sql.objects.c.meta]).where(sql.objects.c.uuid == fn_uuid)
                ).fetchone()
                meta = json.loads(meta)
                meta["disk_peers"].append(peer)
                self.conn.execute(
                    sql.objects.update().where(sql.objects.c.uuid == fn_uuid).values(meta=json.dumps(meta))
                )
                if not fn_uuid in staging_delete:
                    staging_delete.append(fn_uuid)

        if len(todel) > 0:
            print("Starting threads...")
            for (fn_uuid, peer) in todel:
                if not peer in self.config["peers"]:
                    continue
                self.sema.acquire(blocking=True)
                t = threading.Thread(target=self.replication_del_thread, args=(fn_uuid, peer))
                t.start()
            print("Waiting for threads to finish...")
            while threading.active_count() > 1:
                time.sleep(0.5)
            print("All threads done.")
            for (fn_uuid, peer) in todel:
                (meta,) = self.conn.execute(
                    sql.select([sql.objects.c.meta]).where(sql.objects.c.uuid == fn_uuid)
                ).fetchone()
                meta = json.loads(meta)
                meta["disk_peers"].remove(peer)
                self.conn.execute(
                    sql.objects.update().where(sql.objects.c.uuid == fn_uuid).values(meta=json.dumps(meta))
                )

        for fn_uuid in staging_delete:
            contentdir = os.path.join(self.config["staging_files_dir"], fn_uuid[0:2], fn_uuid[2:4])
            if os.path.isfile(os.path.join(contentdir, fn_uuid)):
                os.remove(os.path.join(contentdir, fn_uuid))
Ejemplo n.º 18
0
 def do_put_file(self, fn_uuid):
     """Handle file-level PUT operations."""
     if len(self.http.server.config['stores']) == 0:
         self.send_error(httplib.BAD_REQUEST)
         return
     if not fn_uuid:
         self.send_error(httplib.BAD_REQUEST)
         return
     if not 'content-length' in self.http.headers:
         self.send_error(httplib.LENGTH_REQUIRED)
         return
     length = int(self.http.headers['content-length'])
     try:
         uuid.UUID(fn_uuid)
     except ValueError:
         self.send_error(httplib.BAD_REQUEST)
         return
     res = self.conn.execute(sql.select([
         sql.files.c.store
     ]).where(
         sql.files.c.uuid == fn_uuid
     )).fetchone()
     if res:
         self.send_error(httplib.CONFLICT)
         return
     self.conn.close()
     now = time.time()
     meta_file = {}
     store = self.choose_store()
     store_dir = self.http.server.config['stores'][store]['directory']
     contentdir = os.path.join(store_dir, fn_uuid[0:2], fn_uuid[2:4])
     if not os.path.isdir(contentdir):
         os.makedirs(contentdir)
     m_file = hashlib.md5()
     bytes_disk = 0
     with open(os.path.join(contentdir, '%s.new' % fn_uuid), 'wb') as w:
         bytesread = 0
         toread = 1024
         if (bytesread + toread) > length:
             toread = length - bytesread
         blk = self.http.rfile.read(toread)
         bytesread = bytesread + len(blk)
         while blk:
             m_file.update(blk)
             w.write(blk)
             bytes_disk = bytes_disk + len(blk)
             toread = 1024
             if (bytesread + toread) > length:
                 toread = length - bytesread
             blk = self.http.rfile.read(toread)
             bytesread = bytesread + len(blk)
     md5_hash_file = m_file.hexdigest()
     if 'etag' in self.http.headers:
         if not self.http.headers['etag'].lower() == md5_hash_file:
             os.remove(os.path.join(contentdir, '%s.new' % fn_uuid))
             self.send_error(httplib.CONFLICT)
             return
     shutil.move(os.path.join(contentdir, '%s.new' % fn_uuid), os.path.join(contentdir, fn_uuid))
     meta_file['hash'] = md5_hash_file
     with self.conn.begin():
         self.conn.execute(sql.files.insert().values(
             uuid=fn_uuid,
             bytes_disk=bytes_disk,
             store=store,
             uploader=self.authenticated_account,
             created=now,
             meta=json.dumps(meta_file)
         ))
     self.http.send_response(httplib.CREATED)
     self.http.send_header('Content-Length', 0)
     self.http.send_header('ETag', md5_hash_file)
     self.http.end_headers()
Ejemplo n.º 19
0
 def do_put_object(self, account_name, container_name, object_name):
     """Handle object-level PUT operations."""
     if not 'content-length' in self.http.headers:
         self.send_error(httplib.LENGTH_REQUIRED)
         return
     length = int(self.http.headers['content-length'])
     last_modified = time.time()
     if 'x-unladen-uuid' in self.http.headers:
         try:
             fn_uuid = str(uuid.UUID(self.http.headers['x-unladen-uuid']))
         except ValueError:
             self.send_error(httplib.BAD_REQUEST)
             return
     else:
         fn_uuid = str(uuid.uuid4())
     if 'x-unladen-aes-key' in self.http.headers:
         try:
             aes_key = codecs.getdecoder("hex_codec")(self.http.headers['x-unladen-aes-key'])[0]
         except TypeError:
             self.send_error(httplib.BAD_REQUEST)
             return
         if not len(aes_key) == 32:
             self.send_error(httplib.BAD_REQUEST)
             return
     else:
         aes_key = os.urandom(32)
     meta = {}
     meta['aes_key'] = codecs.getencoder("hex_codec")(aes_key)[0]
     if 'x-detect-content-type' in self.http.headers and self.http.headers['x-detect-content-type'] == 'true':
         (content_type_guess, content_encoding_guess) = mimetypes.guess_type(object_name)
         if content_type_guess:
             meta['content_type'] = content_type_guess
         if content_encoding_guess:
             meta['content_encoding'] = content_encoding_guess
     else:
         if 'content-type' in self.http.headers:
             meta['content_type'] = self.http.headers['content-type']
         if 'content-encoding' in self.http.headers:
             meta['content_encoding'] = self.http.headers['content-encoding']
     if 'content-disposition' in self.http.headers:
         meta['content_disposition'] = self.http.headers['content-disposition']
     expires = None
     if 'x-delete-at' in self.http.headers:
         expires = int(self.http.headers['x-delete-at'])
     elif 'x-delete-after' in self.http.headers:
         expires = last_modified + int(self.http.headers['x-delete-after'])
     user_meta = {}
     for header in self.http.headers:
         if header.lower().startswith('x-object-meta-'):
             user_meta[header[14:]] = self.http.headers[header]
     contentdir = os.path.join(self.http.server.config['staging_files_dir'], fn_uuid[0:2], fn_uuid[2:4])
     if not os.path.isdir(contentdir):
         os.makedirs(contentdir)
     block_size = Crypto.Cipher.AES.block_size
     iv = os.urandom(block_size)
     cipher = Crypto.Cipher.AES.new(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
     m = hashlib.md5()
     m_file = hashlib.md5()
     bytes_disk = 0
     with open(os.path.join(contentdir, '%s.new' % fn_uuid), 'wb') as w:
         m_file.update(iv)
         w.write(iv)
         bytes_disk = bytes_disk + len(iv)
         bytesread = 0
         toread = 1024
         if (bytesread + toread) > length:
             toread = length - bytesread
         blk = self.http.rfile.read(toread)
         bytesread = bytesread + len(blk)
         while blk:
             m.update(blk)
             blk_encrypted = cipher.encrypt(blk)
             m_file.update(blk_encrypted)
             w.write(blk_encrypted)
             bytes_disk = bytes_disk + len(blk_encrypted)
             toread = 1024
             if (bytesread + toread) > length:
                 toread = length - bytesread
             blk = self.http.rfile.read(toread)
             bytesread = bytesread + len(blk)
     md5_hash = m.hexdigest()
     md5_hash_file = m_file.hexdigest()
     if 'etag' in self.http.headers:
         if not self.http.headers['etag'].lower() == md5_hash:
             os.remove(os.path.join(contentdir, '%s.new' % fn_uuid))
             self.send_error(httplib.CONFLICT)
             return
     shutil.move(os.path.join(contentdir, '%s.new' % fn_uuid), os.path.join(contentdir, fn_uuid))
     meta['hash'] = md5_hash
     meta['disk_hash'] = md5_hash_file
     meta['disk_bytes'] = bytes_disk
     meta['disk_peers'] = []
     res = self.conn.execute(sql.select([
         sql.objects.c.uuid
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.name == object_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     with self.conn.begin():
         if res:
             (old_fn_uuid,) = res
             self.conn.execute(sql.objects.update().where(
                 sql.objects.c.uuid == old_fn_uuid
             ).values(
                 deleted=True
             ))
         self.conn.execute(sql.objects.insert().values(
             uuid=fn_uuid,
             deleted=False,
             account=account_name,
             container=container_name,
             name=object_name,
             bytes=length,
             last_modified=last_modified,
             expires=expires,
             meta=json.dumps(meta),
             user_meta=json.dumps(user_meta)
         ))
     self.http.send_response(httplib.CREATED)
     if 'content_type' in meta:
         self.http.send_header('Content-Type', meta['content_type'].encode('utf-8'))
     else:
         self.http.send_header('Content-Type', 'application/octet-stream')
     if 'content_encoding' in meta:
         self.http.send_header('Content-Encoding', meta['content_encoding'].encode('utf-8'))
     if 'content_disposition' in meta:
         self.http.send_header('Content-Disposition', meta['content_disposition'].encode('utf-8'))
     self.http.send_header('Content-Length', 0)
     self.http.send_header('ETag', md5_hash)
     self.http.end_headers()
Ejemplo n.º 20
0
 def do_get_object(self, account_name, container_name, object_name):
     """Handle object-level GET operations."""
     res = self.conn.execute(sql.select([
         sql.objects.c.uuid,
         sql.objects.c.bytes,
         sql.objects.c.meta,
         sql.objects.c.last_modified,
         sql.objects.c.expires,
         sql.objects.c.user_meta
     ]).where(
         sql.objects.c.account == account_name
     ).where(
         sql.objects.c.container == container_name
     ).where(
         sql.objects.c.name == object_name
     ).where(
         sql.objects.c.deleted == False
     )).fetchone()
     if not res:
         self.send_error(httplib.NOT_FOUND)
         return
     (fn_uuid, length, meta, last_modified, expires, user_meta) = res
     if expires and expires <= time.time():
         self.send_error(httplib.NOT_FOUND)
         return
     if meta:
         meta = json.loads(meta)
     else:
         meta = {}
     if user_meta:
         user_meta = json.loads(user_meta)
     else:
         user_meta = {}
     aes_key = codecs.getdecoder("hex_codec")(meta['aes_key'])[0]
     self.http.send_response(httplib.OK)
     if 'content_type' in meta:
         self.http.send_header('Content-Type', meta['content_type'].encode('utf-8'))
     else:
         self.http.send_header('Content-Type', 'application/octet-stream')
     if 'content_encoding' in meta:
         self.http.send_header('Content-Encoding', meta['content_encoding'].encode('utf-8'))
     if 'content_disposition' in meta:
         self.http.send_header('Content-Disposition', meta['content_disposition'].encode('utf-8'))
     self.http.send_header('Content-Length', length)
     self.http.send_header('Last-Modified', self.http.date_time_string(last_modified))
     self.http.send_header('X-Timestamp', last_modified)
     if expires:
         self.http.send_header('X-Delete-At', expires)
     self.http.send_header('ETag', meta['hash'])
     for header in user_meta:
         self.http.send_header(('X-Object-Meta-%s' % header).encode('utf-8'), user_meta[header].encode('utf-8'))
     self.http.end_headers()
     if self.http.command == 'HEAD':
         return
     block_size = Crypto.Cipher.AES.block_size
     cipher = None
     peer = None
     if len(meta['disk_peers']) > 0:
         peer = random.choice(meta['disk_peers'])
     if peer:
         (peer_storage_url, peer_token) = self.conn.execute(sql.select([
             sql.cluster_peers.c.storage_url,
             sql.cluster_peers.c.token
         ]).where(
             sql.cluster_peers.c.peer == peer
         )).fetchone()
         peer_url = urlparse.urlparse(peer_storage_url)
         if peer_url.scheme == 'https':
             h = httplib.HTTPSConnection(peer_url.netloc, timeout=5)
         else:
             h = httplib.HTTPConnection(peer_url.netloc, timeout=5)
         h.putrequest('GET', '%s/%s/%s' % (peer_url.path, '808f1b75-a011-4ea7-82a5-e6aad1092fea', fn_uuid))
         h.putheader('X-Auth-Token', peer_token)
         h.endheaders()
         r = h.getresponse()
     else:
         contentdir = os.path.join(self.http.server.config['staging_files_dir'], fn_uuid[0:2], fn_uuid[2:4])
         r = open(os.path.join(contentdir, fn_uuid), 'rb')
     self.conn.close()
     if not cipher:
         iv = r.read(block_size)
         cipher = Crypto.Cipher.AES.new(aes_key, Crypto.Cipher.AES.MODE_CFB, iv)
     bytesread = 0
     blk = r.read(1024)
     bytesread = bytesread + len(blk)
     while blk:
         buf = cipher.decrypt(blk)
         self.http.wfile.write(buf)
         blk = r.read(1024)
         bytesread = bytesread + len(blk)