def do_head_account(self, account_name): """Handle account-level HEAD operations.""" (objects, containers, bytes) = self.conn.execute(sql.select([ sql.count('*'), sql.count(sql.distinct(sql.objects.c.container)), sql.sum(sql.objects.c.bytes) ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.deleted == False )).fetchone() self.http.send_response(httplib.NO_CONTENT) self.http.send_header('X-Account-Container-Count', containers) self.http.send_header('X-Account-Bytes-Used', bytes) self.http.send_header('X-Account-Object-Count', objects) self.http.end_headers()
def do_delete_container(self, account_name, container_name): """Handle container-level DELETE operations.""" (objects,) = self.conn.execute(sql.select([ sql.count('*') ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.container == container_name ).where( sql.objects.c.deleted == False )).fetchone() if objects > 0: self.send_error(httplib.CONFLICT) return self.http.send_response(httplib.NO_CONTENT) self.http.end_headers()
def do_get_container(self, account_name, container_name): """Handle container-level GET operations.""" (objects,) = self.conn.execute(sql.select([ sql.count('*'), ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.container == container_name ).where( sql.objects.c.deleted == False )).fetchone() if objects == 0: self.send_error(httplib.NOT_FOUND) return s = sql.select([ sql.objects.c.name, sql.objects.c.bytes, sql.objects.c.last_modified, sql.objects.c.expires, sql.objects.c.meta ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.container == container_name ).where( sql.objects.c.deleted == False ).order_by( sql.objects.c.name ) s = self.apply_list_query_params(s, sql.objects.c.name) out = [] for (name, bytes, last_modified, expires, meta) in self.conn.execute(s): if expires and expires <= time.time(): continue if meta: meta = json.loads(meta) else: meta = {} content_type = 'application/octet-stream' if 'content_type' in meta: content_type = meta['content_type'] out.append({'name': name, 'hash': meta['hash'], 'bytes': int(bytes), 'last_modified': float(last_modified), 'content_type': content_type}) self.output_file_list(out, 'container', container_name, 'object')
def do_head_container(self, account_name, container_name): """Handle container-level HEAD operations.""" (objects, bytes) = self.conn.execute(sql.select([ sql.count('*'), sql.sum(sql.objects.c.bytes) ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.container == container_name ).where( sql.objects.c.deleted == False )).fetchone() if objects == 0: self.send_error(httplib.NOT_FOUND) return self.http.send_response(httplib.NO_CONTENT) self.http.send_header('X-Container-Bytes-Used', bytes) self.http.send_header('X-Container-Object-Count', objects) self.http.end_headers()
def do_get_account(self, account_name): """Handle account-level GET operations.""" s = sql.select([ sql.objects.c.container, sql.count('*'), sql.sum(sql.objects.c.bytes) ]).where( sql.objects.c.account == account_name ).where( sql.objects.c.deleted == False ).group_by( sql.objects.c.container ).order_by( sql.objects.c.container ) s = self.apply_list_query_params(s, sql.objects.c.container) out = [] for (container_name, count, bytes) in self.conn.execute(s): out.append({'name': container_name, 'count': int(count), 'bytes': int(bytes)}) self.output_file_list(out, 'account', account_name, 'container')
def do_head_file_container(self): """Handle file container-level HEAD operations.""" if len(self.http.server.config['stores']) == 0: self.send_error(httplib.BAD_REQUEST) return (files, bytes) = self.conn.execute(sql.select([ sql.count('*'), sql.sum(sql.files.c.bytes_disk) ]).where( sql.files.c.uploader == self.authenticated_account )).fetchone() if not bytes: bytes = 0 total_config_bytes = 0 for store in self.http.server.config['stores']: total_config_bytes = total_config_bytes + self.http.server.config['stores'][store]['size'] self.http.send_response(httplib.NO_CONTENT) self.http.send_header('X-Container-Bytes-Used', bytes) self.http.send_header('X-Container-Object-Count', files) self.http.send_header('X-Unladen-Node-Capacity', total_config_bytes) self.http.end_headers()
def check_store_balance(self): store_stats = {} total_bytes = 0 total_objects = 0 for (store, objects, bytes) in self.conn.execute( sql.select([sql.files.c.store, sql.count("*"), sql.sum(sql.files.c.bytes_disk)]).group_by(sql.files.c.store) ): total_bytes = total_bytes + bytes total_objects = total_objects + objects store_stats[store] = (objects, bytes) if total_objects == 0: return total_config_bytes = 0 for store in self.config["stores"]: total_config_bytes = total_config_bytes + self.config["stores"][store]["size"] rebalance_stores = False total_transfer_out = 0 total_transfer_in = 0 transfer_d = {} for store in sorted(self.config["stores"]): if store in store_stats: (objects, bytes) = store_stats[store] else: (objects, bytes) = (0, 0) objects_pct = float(objects) / float(total_objects) bytes_pct = float(bytes) / float(total_bytes) config_pct = float(self.config["stores"][store]["size"]) / float(total_config_bytes) config_pct_delta = bytes_pct - config_pct print( "%s: %d objects (%0.02f%%), %d bytes (%0.02f%%, %0.02f%% from config)" % (store, objects, objects_pct * 100.0, bytes, bytes_pct * 100.0, config_pct_delta * 100.0) ) should_have = int(float(total_bytes) * config_pct) print(" Should have %d bytes" % should_have) transfer = bytes - should_have transfer_d[store] = transfer if transfer > 0: print(" Transfer %d bytes out" % abs(transfer)) total_transfer_out = total_transfer_out + abs(transfer) else: print(" Transfer %d bytes in" % abs(transfer)) total_transfer_in = total_transfer_in + abs(transfer) if abs(config_pct_delta) > 0.01: rebalance_stores = True if rebalance_stores: print("Time to rebalance the stores") else: print("Stores are sufficiently balanced") return transfer_orders = [] for store_from in transfer_d: if transfer_d[store_from] < 0: continue stores_transfer_to = {} for store_to in transfer_d: if transfer_d[store_to] > 0: continue x = int(float(abs(transfer_d[store_to])) / float(total_transfer_in) * float(transfer_d[store_from])) print("Transfer %d bytes from %s to %s" % (x, store_from, store_to)) if x > 0: stores_transfer_to[store_to] = x bytes_left = x res = self.conn.execute( sql.select([sql.files.c.uuid, sql.files.c.bytes_disk]) .where(sql.files.c.store == store_from) .order_by(sql.desc(sql.files.c.bytes_disk)) ) for (fn_uuid, bytes) in res: store_to = None bytes_left = 0 for store_to_candidate in stores_transfer_to: if float(bytes) / float(stores_transfer_to[store_to_candidate]) < 1.05: store_to = store_to_candidate bytes_left = stores_transfer_to[store_to_candidate] break if not store_to: continue print("Move %s (%d) from %s to %s" % (fn_uuid, bytes, store_from, store_to)) transfer_orders.append((fn_uuid, store_from, store_to)) bytes_left = bytes_left - bytes if bytes_left <= 0: del (stores_transfer_to[store_to]) else: stores_transfer_to[store_to] = bytes_left if len(stores_transfer_to) == 0: res.close() break print("") print("") random.shuffle(transfer_orders) for (fn_uuid, store_from, store_to) in transfer_orders: print("%s %s %s" % (fn_uuid, store_from, store_to)) store_dir_from = self.config["stores"][store_from]["directory"] contentdir_from = os.path.join(store_dir_from, fn_uuid[0:2], fn_uuid[2:4]) store_dir_to = self.config["stores"][store_to]["directory"] contentdir_to = os.path.join(store_dir_to, fn_uuid[0:2], fn_uuid[2:4]) if not os.path.isdir(contentdir_to): os.makedirs(contentdir_to) shutil.copy(os.path.join(contentdir_from, fn_uuid), os.path.join(contentdir_to, fn_uuid)) self.conn.execute(sql.files.update().where(sql.files.c.uuid == fn_uuid).values(store=store_to)) os.remove(os.path.join(contentdir_from, fn_uuid))