def broadcast_add_replica(): return multicast(replicas_view_universe, lambda address: 'http://' + address + route('-view'), http_method=HTTPMethods.PUT, timeout=3, data=json.dumps({'socket-address': my_address}), headers={'Content-Type': 'application/json'})
def send_update_delete(key): vector_clock[my_address_no_port] += 1 update_vector_clock_file() headers = { 'VC': json.dumps(vector_clock), 'Content-Type': 'application/json' } my_id = get_my_id() if my_id == -1: return multicast(shard_view_universe[my_id], lambda address: 'http://' + address + route('/' + key), http_method=HTTPMethods.DELETE, timeout=3, headers=headers)
def shard_add_member(shard_id): shard_id = int(shard_id) json_data = request.get_json() new_member = json_data['socket-address'] if new_member not in replicas_view_universe: return jsonify( {'message': "Node does not exist in our view, can't add to shard"}), 400 if any([new_member in shard for shard in shard_view_universe ]): # Is the new member in some shard? if new_member in shard_view_universe[shard_id]: return jsonify({ 'message': f'Discarded: {new_member} is already a member of shard {shard_id}' }), 200 else: return jsonify({ 'message': f'Discarded: {new_member} is already a member of another shard' }), 201 shard_view_universe[shard_id].add(new_member) # Forward this request to everyone. multicast( replicas_view_universe, lambda address: 'http://' + address + route_shard( f'/add-member/{shard_id}'), http_method=HTTPMethods.PUT, timeout=3, data=request.get_data(), headers=request.headers, ) return jsonify({ 'message': f'Successfully added node {new_member} to shard {shard_id}', }), 200
def multicast_heartbeat_blocking(addresses): logger.info(f'Starting HB multicast: {addresses}') fs = multicast(addresses, address_to_heartbeat_uri, timeout=TIMEOUT, headers={'VC': json.dumps(get_vector_clock())}) unicast_responses = [ f.result() for f in concurrent.futures.as_completed(fs) ] alive = [ ur.address for ur in unicast_responses if ur.response is not None and ur.response.status_code == 200 ] logger.info(f'Alive: {alive}') return alive
def reshard(): global shard_view_universe json_data = request.get_json() incoming_addr = request.remote_addr if incoming_addr not in replicas_view_no_port: # From client. shard_count = json_data['shard-count'] try: shard_view_universe = create_shard_view(replicas_view_universe, shard_count) except FaultToleranceError: return jsonify({ 'message': 'Not enough nodes to provide fault-tolerance with the given shard count!' }), 400 multicast(replicas_view_universe, lambda a: 'http://' + a + route_shard('/reshard'), http_method=HTTPMethods.PUT, data=json.dumps({ 'shard_view_universe': [list(s) for s in shard_view_universe] }), headers={'Content-Type': 'application/json'}, timeout=3) else: # From replica. shard_view_universe = [ set(s) for s in json_data['shard_view_universe'] ] global SHARD_COUNT SHARD_COUNT = len(shard_view_universe) global vector_clock vector_clock = {address: 0 for address in replicas_view_no_port} # Copies all data in store to partitions to be sent out to other shards partitions = partition_store() # Clears its own delivery buffer and store global delivery_buffer delivery_buffer = [] global store store = {} sleep( 2) # Give time for servers doing resharding to also clear their store. for shard_id, partition in partitions.items(): multicast( shard_view_universe[shard_id], lambda a: 'http://' + a + route( '/union-store' ), # Create endpoint to extend store with another store in one request. http_method=HTTPMethods.PUT, data=json.dumps({'partition': partition}), headers={'Content-Type': 'application/json'}, timeout=3) return jsonify({'message': 'Resharding done successfully'}), 200