def put_all(self, key, value): worker_addresses = self._get_worker_address(key, False) if not worker_addresses: return False req, tup = self._prepare_data_request(key) req.type = PUT tup.payload, tup.lattice_type = self._serialize(value) tup.timestamp = 0 req_ids = [] for address in worker_addresses: req.request_id = self._get_request_id() send_sock = self.pusher_cache.get(address) send_request(req, send_sock) req_ids.append(req.request_id) responses = recv_response(req_ids, self.response_puller, KeyResponse) for resp in responses: tup = resp.tuples[0] if tup.invalidate: # reissue the request self._invalidate_cache(tup.key) return self.durable_put(key, value) if tup.error != NO_ERROR: return False return True
def _query_routing(self, key, port): key_request = KeyAddressRequest() key_request.query_type = u'GET' key_request.response_address = self.ut.get_key_address_connect_addr() key_request.keys.append(key) key_request.request_id = self._get_request_id() dst_addr = 'tcp://' + self.elb_addr + ':' + str(port) send_sock = self.pusher_cache.get(dst_addr) send_request(key_request, send_sock) response = recv_response([key_request.request_id], self.key_address_puller, KeyAddressResponse)[0] if response.error != 0: return [] result = [] for t in response.addresses: if t.key == key: for a in t.ips: result.append(a) return result
def put(self, key, value): port = random.choice(self.elb_ports) worker_address = self._query_routing(key, port) if type(worker_address) == list: worker_address = worker_address[0] if not worker_address: return False send_sock = self.pusher_cache.get(worker_address) # We pass in a list because the data request preparation can prepare # multiple tuples req, tup = self._prepare_data_request([key]) req.type = PUT # PUT only supports one key operations, we only ever have to look at # the first KeyTuple returned. tup = tup[0] tup.payload, tup.lattice_type = self._serialize(value) send_request(req, send_sock) response = recv_response([req.request_id], self.response_puller, KeyResponse)[0] tup = response.tuples[0] if tup.invalidate: self._invalidate_cache(tup.key) return tup.error == NO_ERROR
def get_all(self, keys): if type(keys) != list: keys = [keys] raise ValueError('`get_all` currently only supports single key' + ' GETs.') worker_addresses = {} for key in keys: worker_addresses[key] = self._get_worker_address(key, False) # Initialize all KV pairs to 0. Only change a value if we get a valid # response from the server. kv_pairs = {} for key in keys: kv_pairs[key] = None for key in keys: if worker_addresses[key]: req, _ = self._prepare_data_request(key) req.type = GET req_ids = [] for address in worker_addresses[key]: req.request_id = self._get_request_id() send_sock = self.pusher_cache.get(address) send_request(req, send_sock) req_ids.append(req.request_id) responses = recv_response(req_ids, self.response_puller, KeyResponse) for resp in responses: for tup in resp.tuples: if tup.invalidate: self._invalidate_cache(tup.key) if tup.error == NO_ERROR: val = self._deserialize(tup) if kv_pairs[tup.key]: kv_pairs[tup.key].merge(val) else: kv_pairs[tup.key] = val return kv_pairs
def get(self, keys): if type(keys) != list: keys = [keys] worker_addresses = {} for key in keys: worker_addresses[key] = (self._get_worker_address(key, 1)) #print("Worker Address: {}".format(worker_addresses[key])) if type(worker_addresses[key]) == list: worker_addresses[key] = worker_addresses[key][0] # Initialize all KV pairs to 0. Only change a value if we get a valid # response from the server. kv_pairs = {} for key in keys: kv_pairs[key] = None request_ids = [] for key in worker_addresses: if worker_addresses[key]: send_sock = self.pusher_cache.get(worker_addresses[key]) req, _ = self._prepare_data_request([key]) req.type = GET send_request(req, send_sock) request_ids.append(req.request_id) # Wait for all responses to return. responses = recv_response(request_ids, self.response_puller, KeyResponse) for response in responses: for tup in response.tuples: if tup.invalidate: self._invalidate_cache(tup.key, 'get') if tup.error == NO_ERROR and not tup.invalidate: kv_pairs[tup.key] = self._deserialize(tup) return kv_pairs
def put(self, keys, values): request_ids = [] if type(keys) != list: keys = [keys] if type(values) != list: values = [values] for key, value in zip(keys, values): worker_address = self._get_worker_address(key) if not worker_address: return False send_sock = self.pusher_cache.get(worker_address) # We pass in a list because the data request preparation can prepare # multiple tuples req, tup = self._prepare_data_request([key]) req.type = PUT request_ids.append(req.request_id) # PUT only supports one key operations, we only ever have to look at # the first KeyTuple returned. tup = tup[0] tup.payload, tup.lattice_type = self._serialize(value) send_request(req, send_sock) responses = recv_response(request_ids, self.response_puller, KeyResponse) results = {} for response in responses: tup = response.tuples[0] if tup.invalidate: self._invalidate_cache(tup.key) results[tup.key] = (tup.error == NO_ERROR) return results
def get_delta(self, keys): if type(keys) != list: keys = [keys] worker_addresses = {} for key in keys: worker_addresses[key] = (self._get_worker_address(key)) # Initialize all KV pairs to 0. Only change a value if we get a valid # response from the server. kv_pairs = {} for key in keys: kv_pairs[key] = None request_ids = [] for key in worker_addresses: if key in self.cache: # TODO: if the key is in cache, # send the previous_payplad along with the request kv_pairs[key] = self.cache[key] if worker_addresses[key]: send_sock = self.pusher_cache.get(worker_addresses[key]) if self.cache[key].isinstance(LWWPairLattice): req, _ = self._prepare_delta_data_request([key], self._serialize(LWWPairLattice(self.cache[key].ts, ""))) else: req, _ = self._prepare_delta_data_request([key], self._serialize(self.cache[key])) req.type = GET send_request(req, send_sock) request_ids.append(req.request_id) else: if worker_addresses[key]: send_sock = self.pusher_cache.get(worker_addresses[key]) req, _ = self._prepare_data_request([key]) req.type = GET send_request(req, send_sock) request_ids.append(req.request_id) # Wait for all responses to return. responses = recv_response(request_ids, self.response_puller, KeyResponse) for response in responses: for tup in response.tuples: if tup.error == NO_ERROR: if tup.invalidate: self._invalidate_cache(tup.key) if (tup.identical): kv_pairs[tup.key] = self.cache[key] else: lattice_value = self._deserialize(tup) kv_pairs[tup.key] = lattice_value cache[tup.key] = lattice_value return kv_pairs
def lambda_handler(event, context): print('Lambda started') num_txns = int(event["num_txns"]) num_reads = int(event["num_reads"]) num_writes = int(event["num_writes"]) num_lookups = int(event["num_lookups"]) benchmark_server = event["benchmark_ip"] elb = event["elb"] zipf = float(event["zipf"]) prefix = event["prefix"] N = int(event["N"]) x = np.arange(1, N) weights = x**(-zipf) weights /= weights.sum() bounded_zipf = stats.rv_discrete(name='bounded_zipf', values=(x, weights)) read_times = [] write_times = [] lookup_times = [] throughput_time = 0 ip = requests.get('http://checkip.amazonaws.com').text.rstrip() sip = socket.gethostname() print('AWS IP Got {}'.format(ip)) print('Socket IP Got {}'.format(sip)) dumb_client = AnnaTcpClient(elb, ip) for i in range(num_txns): print('*** Starting Transaction ' + str(i) + ' ! ***') # Perform routing lookups for _ in range(num_lookups): key = prefix + str(bounded_zipf.rvs(size=1)[0]) port = 6450 start = time.time() addresses = dumb_client._query_routing(key, port) end = time.time() lookup_times.append(end - start) throughput_time += (end - start) # Perform writes for _ in range(num_writes): key = prefix + str(bounded_zipf.rvs(size=1)[0]) port = random.choice([6450, 6451, 6452, 6453]) addresses = dumb_client._query_routing(key, port) send_sock = dumb_client.pusher_cache.get(addresses[0]) data = os.urandom(4096) lww = LWW(time.time_ns(), data) req, tup = dumb_client._prepare_data_request([key]) req.type = PUT rids = [req.request_id] tup = tup[0] tup.payload, tup.lattice_type = dumb_client._serialize(value) start = time.time() send_request(req, send_sock) responses = recv_response(rids, dumb_client.response_puller, KeyResponse) end = time.time() write_times.append(end - start) throughput_time += (end - start) # Perform reads for _ in range(num_reads): key = prefix + str(bounded_zipf.rvs(size=1)[0]) port = random.choice([6450, 6451, 6452, 6453]) addresses = dumb_client._query_routing(key, port) send_sock = dumb_client.pusher_cache.get(addresses[0]) req, _ = dumb_client._prepare_data_request([key]) req.type = GET rids = [req.request_id] start = time.time() send_request(req, send_sock) # Wait for all responses to return. responses = recv_response(rids, dumb_client.response_puller, KeyResponse) end = time.time() read_times.append(end - start) throughput_time += (end - start) throughput = (num_txns * (num_writes + num_reads)) / throughput_time convert = lambda x: x * 1000 read_lat = list(map(convert, read_times)) write_lat = list(map(convert, write_times)) lookup_lat = list(map(convert, lookup_times)) read_msg = ",".join(map(str, read_lat)) write_msg = ",".join(map(str, write_lat)) lookup_msg = ",".join(map(str, lookup_lat)) ctx = zmq.Context(1) sckt = ctx.socket(zmq.PUSH) sckt.connect('tcp://%s:6600' % benchmark_server) message = str(throughput) + ";" + str(read_msg) + ";" + str( write_msg) + ";" + str(lookup_msg) sckt.send_string(message) return "Success"