class KVStore(KVStoreBase): def __init__(self): super(KVStore, self).__init__() region = settings.AWS_REGION_NAME access_key = settings.AWS_ACCESS_KEY_ID secret = settings.AWS_SECRET_ACCESS_KEY conn = boto.dynamodb2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret) self.table = Table(settings.THUMBNAIL_DYNAMODB_NAME, connection=conn) def _get_raw(self, key): try: return self.table.get_item(key=key)["value"] except boto.dynamodb2.exceptions.ItemNotFound: pass def _set_raw(self, key, value): try: item = self.table.get_item(key=key) except boto.dynamodb2.exceptions.ItemNotFound: item = self.table.new_item() item["key"] = key item["value"] = value item.save(overwrite=True) def _delete_raw(self, *keys): [self.table.delete_item(key=k) for k in keys] def _find_keys_raw(self, prefix): return [i["key"] for i in self.table.scan(key__beginswith=prefix)]
class KVStore(KVStoreBase): def __init__(self): super().__init__() region = settings.AWS_REGION_NAME access_key = settings.AWS_ACCESS_KEY_ID secret = settings.AWS_SECRET_ACCESS_KEY conn = boto.dynamodb2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret) self.table = Table(settings.THUMBNAIL_DYNAMODB_NAME, connection=conn) def _get_raw(self, key): try: return self.table.get_item(key=key)['value'] except boto.dynamodb2.exceptions.ItemNotFound: pass def _set_raw(self, key, value): try: item = self.table.get_item(key=key) except boto.dynamodb2.exceptions.ItemNotFound: item = self.table.new_item() item['key'] = key item['value'] = value item.save(overwrite=True) def _delete_raw(self, *keys): [self.table.delete_item(key=k) for k in keys] def _find_keys_raw(self, prefix): return [i['key'] for i in self.table.scan(key__beginswith=prefix)]
class Post(object): def __init__(self): self.table = Table('Post') def create(self, post_id, post_info): data = dict( post_id=post_id, info=post_info ) self.table.put_item(data=data) def get(self, post_id): post = self.table.get_item(post_id=post_id) post_info = post['info'] return post_info def update(self, post_id, post_info): post = self.table.get_item(post_id=post_id) post['info'] = post_info post.save() def batch_query(self, post_id_list): keys = [] for post_id in post_id_list: keys.append(dict( post_id=post_id )) many_posts = self.table.batch_get(keys=keys) post_info_list = [] for post in many_posts: post_info_list.append(post['info']) return post_info_list
class Comment(object): def __init__(self): self.table = Table('Comment') def create(self, comment_id, comment_info): data = dict( comment_id=comment_id, info=comment_info ) self.table.put_item(data=data) def get(self, comment_id): comment = self.table.get_item(comment_id=comment_id) comment_info = comment['info'] return comment_info def update(self, comment_id, comment_info): comment = self.table.get_item(comment_id=comment_id) comment['info'] = comment_info comment.save() def batch_query(self, comment_id_list): keys = [] for comment_id in comment_id_list: keys.append(dict( comment_id=comment_id )) many_comments = self.table.batch_get(keys=keys) comment_info_list = [] for comment in many_comments: comment_info_list.append(comment['info']) return comment_info_list
def get_dict_of_coordinates(conn, sites): ''' sites is a list of sites''' coordinate_table = Table('site_coordinates', connection=conn) coordinate_dict = {} for site in sites: try: item = coordinate_table.get_item(site=site) except boto.dynamodb2.exceptions.ItemNotFound as e: print(e) item = None except: sleep(3) item = coordinate_table.get_item(site=site) if item != None: stuff = {} for i in item.items(): if i[0] == 'lat' or i[0] == 'lon': stuff.update({i[0]: float(i[1])}) else: stuff.update({i[0]: i[1]}) coordinate_dict.update( {stuff['site']: { 'lat': stuff['lat'], 'lng': stuff['lon'] }}) filename = '../../data/coordinates.json' with open(filename, 'w') as f: json.dump(coordinate_dict, f) return coordinate_dict
def post(self): client_data = self.data if self.data['isShelter'] == "0": userid = yield verify_pwd(client_data['phone'], client_data['password'], self.dynamo) # verify user logged in if not userid: self.write_json({ 'result': 'fail', 'message': 'authantication failed' }) return if userid: #get the user info user_table = Table('User_Table', connection=self.dynamo) userinfo = user_table.get_item(UserID=userid) self.write_json({ 'result': 'success', 'message': 'successfully logged in', 'userid': userid, 'firstname': userinfo['FirstName'], 'lastname': userinfo['LastName'] }) else: userid = yield verify_pwd_shelter(client_data['email'], client_data['password'], self.dynamo) # verify user logged in if not userid: self.write_json({ 'result': 'fail', 'message': 'authantication failed' }) return if userid: #get the user info shelter_table = Table('Shelter_Table', connection=self.dynamo) userinfo = shelter_table.get_item(UserID=userid) self.write_json({ 'result': 'success', 'message': 'successfully logged in', 'userid': userid, })
def writeDB(ngram, idx): tbl = Table('project6-20141589') ngram = ngram.split('\n') cnt = 0 for elem in ngram: elem = elem.split('\t') if len(elem) == 2: try: elem[1] = int(elem[1]) except ValueError: continue atom = {'words': elem[0], 'counts': elem[1]} try: tbl.put_item(data = atom) except ConditionalCheckFailedException: try: item = tbl.get_item(words = elem[0]) item['counts'] += elem[1] item.save() except ItemNotFound: print 'Unknown exception raised for (%s, %d)' % (elem[0], elem[1]) cnt += 1 if cnt % 10000 == 0: print 'Thread %d running (%d)' % (idx, cnt) print 'Thread %d finished with %d' % (idx, cnt)
def retrieve_id(db_name, user_id): #Grabs the table from dynamodb users = Table(db_name, connection=boto.dynamodb2.connect_to_region(AWS_REGION)) #Attempt to grab the item from the table try: #Call for getting item from table user = users.get_item(id=user_id) #Gets the set of activites and changes it to json after converting to list activities = user['activities'] output = json.dumps(list(activities)) #Report success, and adds the relevant information into a tuple result = 200, { "data": { "type": user['type'], "id": str(user['id']), "name": user['name'], "activities": output } } #Catches the exception of when the item isn't found except boto.dynamodb2.exceptions.ItemNotFound: #Reports failure, telling the user that the item isn't found result = 404, { "errors": [{ "not_found": { "id": str(user_id) } }] } #Returns the result back to the main file return result
def run(self): """ """ region_name = 'us-east-1' profile_name = 'ipc-training' dummy_message = None dynamodb_conn = boto.dynamodb2.connect_to_region( region_name, profile_name=profile_name ) spot_request_table = Table( awsspotbatch.common.const.SPOT_REQUEST_TABLE_NAME, connection=dynamodb_conn ) spot_request_item = spot_request_table.get_item( spot_request_uuid=self.spot_request_uuid ) # spot_batch_job_parm_table = Table( awsspotbatch.common.const.SPOT_BATCH_JOB_PARM_TABLE_NAME, connection=dynamodb_conn ) # spot_batch_job_parm_item = spot_batch_job_parm_table.get_item( spot_master_uuid=spot_request_item[TableSpotRequest.spot_master_uuid] ) # batch_job_parm_item = BatchJobParmItem( stringParmFile=spot_batch_job_parm_item[TableSpotBatchJobParm.raw_batch_job_parm_item] ) client_bootstrap_service_cmds_results, client_bootstrap_user_cmds_results = launch_remote_client( spot_request_item ) logger.info( 'spot_request_uuid: ' + self.spot_request_uuid ) for cmd_result in client_bootstrap_service_cmds_results: logger.info( ' service cmd: ' + cmd_result['cmd']) logger.info( ' remote_exit_status: ' + str(cmd_result['remote_exit_status']) ) logger.info( ' buf_std_out: ' + cmd_result['buf_std_out'] ) logger.info( ' buf_std_err: ' + cmd_result['buf_std_err'] ) for cmd_result in client_bootstrap_user_cmds_results: logger.info( ' user cmd: ' + cmd_result['cmd']) logger.info( ' remote_exit_status: ' + str(cmd_result['remote_exit_status']) ) logger.info( ' buf_std_out: ' + cmd_result['buf_std_out'] ) logger.info( ' buf_std_err: ' + cmd_result['buf_std_err'] )
class TopicOperation(object): def __init__(self, user_id, event_id): self.__user_id = user_id self.__event_id = event_id self.table = Table('Event') self.event_item = self.table.get_item(event_id=event_id) self.db_event = EventDBOperation() def invite_members_to_topic(self, members): current_members = self.event_item['info']['members'] new_members = current_members + members topic_name = self.event_item['info']['topic_name'] self.event_item['info']['members'] = new_members self.event_item.partial_save() self.db_event.bulk_create_new_event_per_user(self.__event_id, EVENT_TYPE, members) self.db_event.update_event_to_new(self.__event_id) return topic_name def exit_topic(self): members = self.event_item['info']['members'] if self.__user_id in members: members.remove(self.__user_id) self.event_item['info']['members'] = members self.event_item.partial_save() self.db_event.remove_event_per_user(self.__event_id, self.__user_id)
def get_metric(table_name, connection, metric_name, start_time): table = Table(table_name=table_name, connection=connection) item = table.get_item(metric=metric_name, start_time=start_time) return dict(item)
def main(): """ """ logging.basicConfig( format='%(asctime)s [%(levelname)s] [%(module)s] [%(funcName)s] [%(message)s]', level=logging.INFO ) logger = logging.getLogger(__name__) try: logger.info( 'Starting' ) spot_request_table_name = 'spotbatch.spotrequest' spot_request_uuid = '90719024-e546-11e4-9020-101f74edff46' dynamodb_conn = boto.dynamodb2.connect_to_region( 'us-east-1', profile_name='ipc-training') ts_pending_termination_detected = '2015-01-05T18:02:00Z' spot_request_table = Table( spot_request_table_name, connection=dynamodb_conn ) spot_request_item = spot_request_table.get_item( spot_request_uuid=spot_request_uuid ) spot_request_item[TableSpotRequest.ts_pending_termination_detected] = ts_pending_termination_detected partial_save_result = spot_request_item.partial_save() logger.info(partial_save_result) logger.info( 'Completed Successfully' ) except StandardError as e: logger.error( e ) logger.error( traceback.format_exc() ) sys.exit(8)
def execute(self, observation): station_id = observation['station_id'] raw_time = observation['observation_time_rfc822'] parsed_time = datetime.datetime.fromtimestamp( rfc822.mktime_tz(rfc822.parsedate_tz(raw_time))) epoch = datetime.datetime.utcfromtimestamp(0) delta = int((parsed_time - epoch).total_seconds()) observation['ObservationTime'] = delta observation['StationId'] = station_id composite_key = "%s_%d" % (station_id, delta) observation['CompositeKey'] = composite_key region = os.environ['AWS_DEFAULT_REGION'] accessKey = os.environ['AWS_ACCESS_KEY'] secretKey = os.environ['AWS_SECRET_KEY'] try: connx = boto.dynamodb2.connect_to_region( region, aws_access_key_id=accessKey, aws_secret_access_key=secretKey) obs_table = Table('VocalPelicanObservation', connection=connx) test_row = obs_table.get_item(CompositeKey=composite_key) except JSONResponseError as responseError: # authentication problem print responseError except boto.dynamodb2.exceptions.ItemNotFound as responseError: # not found implies safe to add return obs_table.put_item(observation) return False
def add(db_name, idnum, activities): #get the table users = Table(db_name, connection=boto.dynamodb2.connect_to_region(AWS_REGION)) #split the list by comma activity_set = set(activities.split(",")) #convert set to list so we can dump into a json representation activity_list = list(activity_set) #get json respresentations of the added activities activity_json = json.dumps(activity_list) #try to find matching id and add activities try: user = users.get_item(id=idnum) #print "User exists" #adds activity list to activities field for item in activity_set: user['activities'].add(item) #saves it user.partial_save( ) #because of concurrent activities, we want to partial save response_body = 200, { "data": { "type": "person", "id": str(idnum), "added": activity_json } } #failed to find, output 404 message except Exception, e: #print error in terminal print e response_body = 404, {"errors": [{"not_found": {"id": str(idnum)}}]}
def test_database(): import boto.dynamodb2 from boto.dynamodb2.fields import HashKey from boto.dynamodb2.table import Table from boto.dynamodb2.types import NUMBER, STRING import json sorted_resolved_inverted_index_db = Table('sorted_resolved_inverted_index', schema=[HashKey('word', data_type=STRING),], # If you need to specify custom parameters like keys or region info connection= boto.dynamodb2.connect_to_region(region_name="us-east-1", aws_access_key_id="SECRET", aws_secret_access_key="SECRET") ) DOC_A = "http://www.eecg.toronto.edu/Welcome.html" DOC_B = "http://www.ece.utoronto.ca" DOC_C = "http://www.eecg.toronto.edu" WORD_A = "experience" # Expected and actual result comparison expected_result = [DOC_A,DOC_B,DOC_C] actual_result = sorted_resolved_inverted_index_db.get_item(word=WORD_A) actual_result = json.loads(actual_result['url_list']); # If the two results equal return true bool_A = (cmp(expected_result[0], actual_result[0]) == 0) bool_B = (cmp(expected_result[1], actual_result[1]) == 0) bool_C = (cmp(expected_result[2], actual_result[2]) == 0) if bool_A and bool_B and bool_C: return True else: return False
def isValidUser(self, username, passwd): print('this is ' + _platform + ' system') users = {'*****@*****.**':'admin'} if _platform == "linux" or _platform == "linux2": try: print('Before accessing DynamoDB') users = Table('Users') validuser = users.get_item(EmailId=username,Password=passwd) print('Linux - dynamodb authorization successful') except: validuser = None print('Accessing DynamoDB failed') pass else: try: if (username in users and users[username] == passwd and _platform == "win32"): validuser = '******' print('Windows - authorization successful') else: validuser = None print('Windows authorization failed!') except: validuser = None print('local authentication failed') return validuser
def writeDB(ngram, idx): tbl = Table('project6-20141589') ngram = ngram.split('\n') cnt = 0 for elem in ngram: elem = elem.split('\t') if len(elem) == 2: try: elem[1] = int(elem[1]) except ValueError: continue atom = {'words': elem[0], 'counts': elem[1]} try: tbl.put_item(data=atom) except ConditionalCheckFailedException: try: item = tbl.get_item(words=elem[0]) item['counts'] += elem[1] item.save() except ItemNotFound: print 'Unknown exception raised for (%s, %d)' % (elem[0], elem[1]) cnt += 1 if cnt % 10000 == 0: print 'Thread %d running (%d)' % (idx, cnt) print 'Thread %d finished with %d' % (idx, cnt)
def spot_master_row_partial_save( spot_master_table_name, spot_master_item, dict_keys_values, region_name='us-east-1', profile_name=None ): """Save Master item attribute name/values as specified in dict_keys_values :param spot_master_table_name: :param spot_master_item: :param dict_keys_values: :param region_name: (Default value = 'us-east-1') :param profile_name: (Default value = None) """ for key,value in dict_keys_values.iteritems(): spot_master_item[ key ] = value partial_save_result = spot_master_item.partial_save() if partial_save_result: return # success on first partial save attempt # First partial save failed, try a few more times spot_master_uuid = spot_master_item[ TableSpotMaster.spot_master_uuid ] max_attempts = 10 num_attempts = 0 while True: dynamodb_conn = boto.dynamodb2.connect_to_region( region_name, profile_name=profile_name ) spot_master_table = Table( spot_master_table_name, connection=dynamodb_conn ) spot_master_item = spot_master_table.get_item( spot_master_uuid=spot_master_uuid ) for key,value in dict_keys_values.iteritems(): spot_master_item[ key ] = value partial_save_result = spot_master_item.partial_save() # partial_save_result = master_item.partial_save() if partial_save_result: break num_attempts += 1 if num_attempts == max_attempts: raise awsspotbatch.common.exception.DynamoDbPartialSaveError('Exceeded partial save attempts on master table, spot_master_uuid=' + spot_master_uuid, spot_master_table_name ) time.sleep(6)
def send_pos(conn, df, site): '''Send position observations and their uncertainties to the position table. Function checks if item exists. If it exists, it check to see if it needs an update. If it does then the item is updated. Otherwise it is left alone. ''' pos_data = { 'site' : site } for i in range(0, len(df['Date'])): pos_data.update({str(df['Date'][i]):[{ 'pos' : str(df['Up'][i]), 'uncert' : str(df['Sig'][i]) }] }) pos_table = Table('vertical_positions', connection = conn) try: pos_table.put_item(data = pos_data) except: try: logging.info('Site already in DB, updating values.') item = pos_table.get_item(site=site) keys = pos_data.keys() update = False for key in keys: try: if item[key] != pos_data[key]: item[key] = pos_data[key] update = True except: item[key] = pos_data[key] update = True if update == True: item.partial_save() logging.info('Positions for site {0} updated'.format(site)) else: logging.info('No need to update Positions for site {0}'.format(site)) except: logging.error('Problem loading positions for site {0}'.format(site))
def retrieve_id(db_name, user_id): #Grabs the table from dynamodb users = Table(db_name, connection=boto.dynamodb2.connect_to_region(AWS_REGION)) #Attempt to grab the item from the table try: #Call for getting item from table user = users.get_item(id=user_id) #Gets the set of activites and changes it to json after converting to list activities = user['activities'] output = json.dumps(list(activities)) #Report success, and adds the relevant information into a tuple result = 200, { "data": { "type": user['type'], "id": str(user['id']), "name": user['name'], "activities": output } } #Catches the exception of when the item isn't found except boto.dynamodb2.exceptions.ItemNotFound: #Reports failure, telling the user that the item isn't found result = 404, {"errors": [{"not_found": {"id": str(user_id)}}]} #Returns the result back to the main file return result
def process( self, message ) : """ Increment the Success Count. When this matches :param message: SQS Message instance """ try: spot_master_msg = SpotMasterMsg( raw_json=message.get_body() ) spot_master_uuid = spot_master_msg.spot_master_uuid dynamodb_conn = boto.dynamodb2.connect_to_region( self.region_name, profile_name=self.profile_name ) max_attempts = 10 num_attempts = 0 while True: spot_master_table = Table( self.spot_master_table_name, connection=dynamodb_conn ) spot_master_item = spot_master_table.get_item( spot_master_uuid=spot_master_uuid ) spot_master_item[ TableSpotMaster.num_requests_complete_ok ] = spot_master_item[ TableSpotMaster.num_requests_complete_ok ] + 1 try: partial_save_result = spot_master_item.partial_save() # partial_save_result = master_item.partial_save() if partial_save_result: break except StandardError: pass num_attempts += 1 if num_attempts == max_attempts: raise awsspotbatch.common.exception.DynamoDbPartialSaveError('Exceeded partial save attempts on master table for num_requests_complete_ok, spot_master_uuid=' + spot_master_uuid, self.spot_master_table_name ) time.sleep(6) self.spot_master_sqs_message_durable.delete_message(message) except StandardError as e: logger.error( fmt_master_item_msg_hdr( spot_master_item ) + str(e) ) logger.error( fmt_master_item_msg_hdr( spot_master_item ) + traceback.format_exc() )
def get_minion(instanceid): result = False try: minions = Table('minions') result = minions.get_item(instanceid=instanceid) except Exception, e: raise e
def getReportID(self, val_hashkey, tablename=""): """ Purpose: Used to query any Dynamo DB table having HASH_KEY as REPORT_NAME by providing the parameters. Method will return an resultset object of all the records matching for criteria. :param self: class object itself :param val_hashkey: Hash Key value for the Dynamo DB table. :param tablename: Dynamo DB table name. """ if not tablename: tablename = 'TBL_AWS_REPORT_HDR' ## Create table object for the dynamo DB table tab = Table(tablename, connection=self.conn) try: #get the record from table based on reportname item = tab.get_item(Report_Name=val_hashkey.upper()) except Exception, e: self.m_logger.info("Error: Error While running the getReportID method..") self.m_logger.info("Exception: "+ str(e) +"occured while getReportID method") self.m_logger.sendlog() sys.exit(1)
class dynamodb(): def __init__(self): self.conn = dynamodb2.connect_to_region( REGION, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY, ) self.table = Table(TABLE_NAME, connection=self.conn) def read_from_db(self): results = self.table.scan() # logger.info('results: {}'.format(results)) data = [] for dynamo_item in results: data.append(dict(dynamo_item.items())) return data def delete_from_db(self, card_id, timestamp): try: a = self.table.get_item(card_data=card_id, timestamp=timestamp) a.delete() except Exception as e: print 'error: {}'.format(e)
def execute(self, observation): station_id = observation['station_id'] raw_time = observation['observation_time_rfc822'] parsed_time = datetime.datetime.fromtimestamp(rfc822.mktime_tz(rfc822.parsedate_tz(raw_time))) epoch = datetime.datetime.utcfromtimestamp(0) delta = int((parsed_time - epoch).total_seconds()) observation['ObservationTime'] = delta observation['StationId'] = station_id composite_key = "%s_%d" % (station_id, delta) observation['CompositeKey'] = composite_key region = os.environ['AWS_DEFAULT_REGION'] accessKey = os.environ['AWS_ACCESS_KEY'] secretKey = os.environ['AWS_SECRET_KEY'] try: connx = boto.dynamodb2.connect_to_region(region, aws_access_key_id=accessKey, aws_secret_access_key=secretKey) obs_table = Table('VocalPelicanObservation', connection = connx) test_row = obs_table.get_item(CompositeKey=composite_key) except JSONResponseError as responseError: # authentication problem print responseError except boto.dynamodb2.exceptions.ItemNotFound as responseError: # not found implies safe to add return obs_table.put_item(observation) return False
def display_keypair(): """ """ dynamodb_conn = boto.dynamodb2.connect_to_region( 'us-east-1', profile_name='ipc-training' ) spot_rsa_key_table = Table( awsspotbatch.common.const.SPOT_RSA_KEY_TABLE_NAME, connection=dynamodb_conn ) rsa_key_item = spot_rsa_key_table.get_item( spot_master_uuid='4d70b3da-f5af-11e4-b866-101f74edff46' ) kp_material_dec = decode( kp_enc_key, str( rsa_key_item[ TableSpotRSAKey.rsa_key_encoded ]) ) print kp_material_dec
def get_batch_job_parm_item( spot_master_uuid, spot_batch_job_parm_table_name, region_name='us-east-1', profile_name=None, attributes=None ): """ :param spot_master_uuid: :param spot_batch_job_parm_table_name: :param region_name: (Default value = 'us-east-1') :param profile_name: (Default value = None) :param attributes: (Default value = None) """ get_attempt_cnt = 0 get_attempt_max = 10 dynamodb_conn = boto.dynamodb2.connect_to_region( region_name, profile_name=profile_name ) while True: batch_job_parm_item_table = Table( spot_batch_job_parm_table_name, connection=dynamodb_conn ) try: batch_job_parm_item = batch_job_parm_item_table.get_item( spot_master_uuid=spot_master_uuid, attributes=attributes ) return batch_job_parm_item except StandardError as e: get_attempt_cnt += 1 if get_attempt_cnt == get_attempt_max: raise awsspotbatch.common.exception.DynamoDbGetItemMaxAttemptsExceeded('Failed attempt to get item from: ' + spot_batch_job_parm_table_name + ' for spot_master_uuid: ' + spot_master_uuid + ' due to exception: ' + e.get_message(), spot_batch_job_parm_table_name ) dynamodb_conn = boto.dynamodb2.connect_to_region( region_name, profile_name=profile_name ) time.sleep(6)
def get_minion(instanceid): result = False try: minions = Table("minions") result = minions.get_item(instanceid=instanceid) except Exception, e: raise e
def getSecret(name, version="", region="us-east-1", table="credential-store"): ''' fetch and decrypt the secret called `name` ''' secretStore = Table(table, connection=boto.dynamodb2.connect_to_region(region)) if version == "": # do a consistent fetch of the credential with the highest version result_set = [x for x in secretStore.query_2(limit=1, reverse=True, consistent=True, name__eq=name)] if not result_set: raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name) material = result_set[0] else: material = secretStore.get_item(name=name, version=version) kms = boto.kms.connect_to_region(region) # Check the HMAC before we decrypt to verify ciphertext integrity try: kms_response = kms.decrypt(b64decode(material['key'])) except: raise KmsError("Could not decrypt hmac key with KMS") key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] hmac = HMAC(hmac_key, msg=b64decode(material['contents']), digestmod=SHA256) if hmac.hexdigest() != material['hmac']: raise IntegrityError("Computed HMAC on %s does not match stored HMAC" % name) dec_ctr = Counter.new(128) decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr) plaintext = decryptor.decrypt(b64decode(material['contents'])) return plaintext
class ReminderOperation(object): def __init__(self, user_id, event_id): self.__user_id = user_id self.__event_id = event_id self.table = Table('Event') self.event_item = self.table.get_item(event_id=event_id) self.db_event = EventDBOperation() def change_receivers(self, receivers): current_receivers = self.event_item['info']['receivers'] self.event_item['info']['receivers'] = receivers self.event_item['info']['receiver_status'] = dict.fromkeys(receivers, 0) self.event_item.partial_save() self.db_event.remove_event_by_user_id_list(self.__event_id, current_receivers) self.db_event.bulk_create_new_event_per_user(self.__event_id, EVENT_TYPE, receivers) self.db_event.update_event_to_new(self.__event_id) def complete_reminder_by_receiver(self, message=None): creator_id = self.event_item['info']['creator_id'] self.event_item['info']['receiver_status'][self.__user_id] = 1 self.event_item.partial_save() self.db_event.save_comment_by_event_id(self.__event_id, self.__user_id, action=1, content=message) self.db_event.update_event_to_new(self.__event_id) return creator_id def revoke_reminder_by_creator(self, message=None): creator_id = self.event_item['info']['creator_id'] receivers = self.event_item['info']['receivers'] self.event_item['info']['status'] = 5 self.event_item.partial_save() self.db_event.save_comment_by_event_id(self.__event_id, self.__user_id, action=5, content=message) user_id_list = receivers + [self.__user_id] self.db_event.remove_event_per_user(self.__event_id, user_id_list) return creator_id def delay_reminder_by_receiver(self, message=None): creator_id = self.event_item['info']['creator_id'] self.event_item['info']['receiver_status'][self.__user_id] = 2 self.event_item.partial_save() self.db_event.save_comment_by_event_id(self.__event_id, self.__user_id, action=2, content=message) self.db_event.update_event_to_new(self.__event_id) return creator_id def reject_reminder_by_receiver(self, message=None): creator_id = self.event_item['info']['creator_id'] self.event_item['info']['receiver_status'][self.__user_id] = 3 self.event_item.partial_save() self.db_event.save_comment_by_event_id(self.__event_id, self.__user_id, action=3, content=message) self.db_event.update_event_to_new(self.__event_id) self.db_event.remove_event_per_user(self.__event_id, self.__user_id) return creator_id def resend_reminder_by_creator(self, message=None): creator_id = self.event_item['info']['creator_id'] self.event_item['info']['status'] = 6 self.event_item.partial_save() self.db_event.save_comment_by_event_id(self.__event_id, self.__user_id, action=6, content=message) self.db_event.update_event_to_new(self.__event_id) return creator_id
def get_vote(object_uuid, user): conn = connect_to_dynamo() votes_table = Table(table_name=get_table_name('votes'), connection=conn) try: vote = votes_table.get_item(parent_object=object_uuid, user=user) return vote except (ItemNotFound, JSONResponseError, ValidationException): return None
class dynamoDB: def __init__(self, db_name, partition_key_name): self.table_dynamo = None self.partition_key_name = partition_key_name try: self.table_dynamo = Table.create(db_name, schema=[HashKey(partition_key_name)], connection=client_dynamo) print ("Wait 20 sec until the table is created") time.sleep(20) print ("New table created.") except Exception as e: self.table_dynamo = Table(db_name, connection=client_dynamo) print ("Table already exists.") def add(self, **kwargs): try: record = self.get(kwargs[self.partition_key_name]) for k,v in kwargs.items(): record[k] = v record.save(overwrite=True) #print("Record has been updated.\n") except Exception as e: self.table_dynamo.put_item(data=kwargs) #print("New entry created.\n") def delete(self, pk): try: record = self.table_dynamo.get_item(**{self.partition_key_name:pk}) self.table_dynamo.delete_item(**{self.partition_key_name:pk}) #print("The record has been deleted.") return record except Exception as e: #print("Cannot delete the record, it does not exist.") pass return None def get(self,pk): try: item = self.table_dynamo.get_item(**{self.partition_key_name:pk}) return item except Exception as e: #print("Cannot get the record, it does not exist.") pass return None def scan(self,**filter_kwargs): return self.table_dynamo.scan(**filter_kwargs)
def delete(id): try: contacts_table = Table('Contacts') contact = contacts_table.get_item(id=id) res = contact.delete() return Response(json.dumps({'message' : 'deleted'}), mimetype='application/json') except Exception as e: return Response(json.dumps({'message' : str(e)}), mimetype='application/json')
def get_by_id(id): try: contacts_table = Table('Contacts') contact = contacts_table.get_item(id=id) res = { 'id':contact['id'], 'name':contact['name'], 'email':contact['email'],'age':str(contact['age'])} return Response(json.dumps(res), mimetype='application/json') except Exception as e: return Response(json.dumps({'message' : str(e)}), mimetype='application/json')
def process( self, message ) : """ Try to submit another Spot Request based on the one that just failed :param message: SQS Message instance """ try: spot_master_msg = SpotMasterMsg( raw_json=message.get_body() ) spot_master_uuid = spot_master_msg.spot_master_uuid logger.info( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'process_resubmit_failed_request') dynamodb_conn = boto.dynamodb2.connect_to_region( self.region_name, profile_name=self.profile_name ) spot_master_table = Table( self.spot_master_table_name, connection=dynamodb_conn ) spot_master_item = spot_master_table.get_item( spot_master_uuid=spot_master_uuid ) spot_request_table = Table( self.spot_request_table_name, connection=dynamodb_conn ) failed_spot_request_item = spot_request_table.get_item( spot_request_uuid=spot_master_msg.spot_request_uuid ) # Request spot instance spot_instance_request = self.resubmit_failed_request_spot_instance( spot_master_item, failed_spot_request_item, dynamodb_conn ) # Queue up a SpotRequestMsg if spot_instance_request != None: spot_request_uuid = str(uuid.uuid1()) spot_request_msg = SpotRequestMsg( spot_request_uuid=spot_request_uuid, spot_master_uuid=spot_master_item[ TableSpotMaster.spot_master_uuid ], spot_request_msg_type=SpotRequestMsg.TYPE_SPOT_REQUEST_INITIATED, spot_request_id=spot_instance_request.id ) spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_SPOT_PRICE ] = str( spot_instance_request.price ) spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_USERNAME ] = spot_master_item[ TableSpotMaster.instance_username ] spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_ATTEMPT_NUMBER ] = int( failed_spot_request_item[ TableSpotRequest.attempt_number ] + 1 ) spot_request_sqs_message_durable = SqsMessageDurable( self.spot_request_queue_name, self.region_name, profile_name=self.profile_name ) spot_request_sqs_message_durable.send_message( spot_request_msg.to_json(), message_attributes=create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_REQUEST_CLASSNAME_SpotRequestMessageSpotRequestInitiated ) ) self.spot_master_sqs_message_durable.delete_message(message) # No instances available - resubmit this message with a delay timer so it will get reprocessed in future else: logger.warning( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'No spot instances available, will try again in ' + str(awsspotbatch.common.const.NO_SPOT_INSTANCES_AVAILABLE_RECHECK_MINUTES) + ' minutes') delay_seconds = awsspotbatch.common.const.NO_SPOT_INSTANCES_AVAILABLE_RECHECK_MINUTES * 60 self.spot_master_sqs_message_durable.send_message( message.get_body(), message_attributes=create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_MASTER_CLASSNAME_SpotMasterMessageResubmitFailedRequest ), delay_seconds=delay_seconds ) self.spot_master_sqs_message_durable.delete_message(message) except StandardError as e: logger.error( fmt_master_item_msg_hdr( spot_master_item ) + str(e) ) logger.error( fmt_master_item_msg_hdr( spot_master_item ) + traceback.format_exc() )
def test_update_item(self): self.storage_mocker.StubOutWithMock(storage, 'get_item') hash_key = "4.5621201231232132132132132132132142354E126" range_key = "range" storage.get_item( IgnoreArg(), IgnoreArg(), IgnoreArg(), select_type=IgnoreArg(), consistent=IgnoreArg() ).AndReturn( models.SelectResult( items=[ { "hash_key": models.AttributeValue('N', hash_key), "range_key": models.AttributeValue('S', range_key), "attr_value": models.AttributeValue('S', 'val') } ] ) ) self.storage_mocker.StubOutWithMock(storage, 'describe_table') storage.describe_table(IgnoreArg(), 'test_table').AndReturn( models.TableMeta( '00000000-0000-0000-0000-000000000000', models.TableSchema( { 'hash_key': models.AttributeType('N'), 'range_key': models.AttributeType('S') }, ['hash_key', 'range_key'], ), models.TableMeta.TABLE_STATUS_ACTIVE, None ) ) self.storage_mocker.StubOutWithMock(storage, 'update_item') storage.update_item( IgnoreArg(), IgnoreArg(), key_attribute_map=IgnoreArg(), attribute_action_map=IgnoreArg(), expected_condition_map=IgnoreArg()).AndReturn((True, None)) self.storage_mocker.ReplayAll() table = Table('test_table', connection=self.DYNAMODB_CON) item = table.get_item(consistent=False, hash_key=1, range_key="range") item['attr_value'] = 'updated' item.partial_save() self.storage_mocker.VerifyAll()
def ingest_alerts(): alerts_table = Table('mbta_alerts') saFeed = gtfs_realtime_pb2.FeedMessage() saResponse = requests.get('https://cdn.mbta.com/realtime/Alerts.pb') saFeed.ParseFromString(saResponse.content) now_ts = time.time() alerts = [] for entity in saFeed.entity: if entity.HasField('alert'): include_alert = False for informed in entity.alert.informed_entity: if informed.route_type <= 1: # Subway/Green Line include_alert = True break if include_alert: include_alert = False for period in entity.alert.active_period: # Include all future and current alerts if period.end == 0 or now_ts < period.end: include_alert = True break if include_alert: alerts.append(entity) for entity in alerts: id = int(entity.id) alert = entity.alert sorted_active_periods = sorted(entity.alert.active_period, key=lambda period: period.start) current_period = None for period in sorted_active_periods: if now_ts > period.start and (now_ts < period.end or period.end == 0): current_period = period break if current_period == None: continue alert_item = None try: alert_item = alerts_table.get_item(alert_id=id) except exceptions.ItemNotFound: pass if not alert_item or alert_item['start'] != current_period.start: alert_item = Item(alerts_table, data={ 'alert_id': id, 'start': current_period.start, 'end': current_period.end, 'future': (current_period.start > now_ts), }) send_and_save_event(alert_item, alert, current_period) elif alert_item['future'] == True and alert_item['start'] < now_ts: alert_item['future'] = False send_and_save_event(alert_item, alert, current_period)
def delete_address(pool, address): """Mark an EIP as no longer in use""" #Connect to ddb conn = boto.dynamodb2.connect_to_region(options.region) ddb = Table(options.table_name, connection=conn) eip = ddb.get_item(pool=pool, address=address) del eip['stack_id'] del eip['logical_id'] eip.save()
def delete_address(pool, address): """Mark an EIP as no longer in use""" #Connect to ddb conn = boto.dynamodb2.connect_to_region(options.region) ddb = Table(options.table_name, connection=conn) eip = ddb.get_item(pool=pool, address=address) del eip['stack_id'] del eip['logical_id'] eip.save()
def editendpoint(): ename = request.form['entityname'] uuid = request.form['uuid'] texttochange = request.form['value'] fieldnumber = request.form['fieldnumber'] entitytable = Table(ename, connection=conn) entityinstance = entitytable.get_item(uuid=uuid) entityinstance['fieldname' + fieldnumber] = texttochange entityinstance.save() return entityinstance['fieldname' + fieldnumber]
def delete_item(target_table, target_item): try: table = Table(target_table) to_del = table.get_item(CUID = target_item) to_del.delete() time.sleep(0.5) return except IOError: print "Error deleting item" return
def updateitem(): users = Table('items') curitem = users.get_item(rfid='165', pname='Toothpaste') curitem['tray_status'] = '0' #johndoe['whatever'] = "man, that's just like your opinion" del curitem['tray_Status'] # Affects all fields, even the ones not changed locally. curitem.save()
def keyExists(tableName, region, id): x = True if(tableExistence(tableName,region) != 0): list_item_table = Table(tableName) try: item = list_item_table.get_item(key_id =id) except boto.dynamodb2.exceptions.ItemNotFound: x = False return x
def process( self, message ) : """ Process the message :param message: SQS Message instance """ try: spot_master_msg = SpotMasterMsg( raw_json=message.get_body() ) spot_master_uuid = spot_master_msg.spot_master_uuid logger.info( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'process_check_status' ) # Get master row from DynamoDB and process based on state dynamodb_conn = boto.dynamodb2.connect_to_region( self.region_name, profile_name=self.profile_name ) spot_master_table = Table( self.spot_master_table_name, connection=dynamodb_conn ) spot_master_item = spot_master_table.get_item( spot_master_uuid=spot_master_uuid ) logger.info( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'master state=' + spot_master_item[TableSpotMaster.spot_master_state_code]) next_status_msg_delay_secs = 60 is_send_master_msg_check_status = True master_state_code = spot_master_item[TableSpotMaster.spot_master_state_code] spot_master_item[ TableSpotMaster.ts_last_state_check ] = int( time.time() ) spot_master_row_partial_save( self.spot_master_table_name, spot_master_item, {TableSpotMaster.ts_last_state_check:int( time.time() )}, region_name=self.region_name, profile_name=self.profile_name ) # Process based on the current Master State if SpotMasterStateCode.master_resources_in_progress == master_state_code: self.handle_state_master_resources_in_progress( spot_master_item ) next_status_msg_delay_secs = 5 elif SpotMasterStateCode.master_role_policy_in_progress == master_state_code: self.handle_state_master_role_policy_in_progress( spot_master_item, dynamodb_conn ) next_status_msg_delay_secs = 5 elif SpotMasterStateCode.waiting_for_instances_complete == master_state_code: self.handle_state_waiting_for_instances_complete( spot_master_item ) elif SpotMasterStateCode.waiting_for_instances_terminated == master_state_code: self.handle_state_waiting_for_instances_terminated( spot_master_item ) elif SpotMasterStateCode.waiting_for_master_resources_terminated == master_state_code: self.handle_state_waiting_for_master_resources_terminated( spot_master_item ) next_status_msg_delay_secs = 5 elif SpotMasterStateCode.cleanup_in_progress == master_state_code: self.handle_state_cleanup_in_progress( spot_master_item ) elif SpotMasterStateCode.cleanup_complete == master_state_code: self.handle_state_cleanup_complete( spot_master_item ) is_send_master_msg_check_status = False self.spot_master_sqs_message_durable.delete_message(message) if is_send_master_msg_check_status: spot_master_msg_check_status = SpotMasterMsg( spot_master_uuid=spot_master_uuid, spot_master_msg_type=SpotMasterMsg.TYPE_CHECK_STATUS ) message_attributes = create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_MASTER_CLASSNAME_SpotMasterMessageCheckStatus ) self.spot_master_sqs_message_durable.send_message( spot_master_msg_check_status.to_json(), delay_seconds=next_status_msg_delay_secs, message_attributes=message_attributes ) except StandardError as e: logger.error( fmt_master_uuid_msg_hdr( spot_master_uuid ) + str(e) ) logger.error( fmt_master_uuid_msg_hdr( spot_master_uuid ) + traceback.format_exc() )
def test_update_item(self): self.storage_mocker.StubOutWithMock(storage, 'select_item') hash_key = "4.5621201231232132132132132132132142354E126" range_key = "range" storage.select_item( IgnoreArg(), IgnoreArg(), IgnoreArg(), select_type=IgnoreArg(), limit=IgnoreArg(), consistent=IgnoreArg() ).AndReturn( models.SelectResult( items=[ { "hash_key": models.AttributeValue('N', hash_key), "range_key": models.AttributeValue('S', range_key), "attr_value": models.AttributeValue('S', 'val') } ] ) ) self.storage_mocker.StubOutWithMock(storage, 'describe_table') storage.describe_table(IgnoreArg(), 'test_table').AndReturn( models.TableMeta( models.TableSchema( { 'hash_key': models.AttributeType('N'), 'range_key': models.AttributeType('S') }, ['hash_key', 'range_key'], ), models.TableMeta.TABLE_STATUS_ACTIVE ) ) self.storage_mocker.StubOutWithMock(storage, 'update_item') storage.update_item( IgnoreArg(), IgnoreArg(), key_attribute_map=IgnoreArg(), attribute_action_map=IgnoreArg(), expected_condition_map=IgnoreArg()).AndReturn((True, None)) self.storage_mocker.ReplayAll() table = Table('test_table', connection=self.DYNAMODB_CON) item = table.get_item(consistent=False, hash_key=1, range_key="range") item['attr_value'] = 'updated' item.partial_save() self.storage_mocker.VerifyAll()
def getItem(tableName, region,keyVal): if(tableExistence(tableName,region) != 0): list_item_table = Table(tableName) try: item = list_item_table.get_item(key_id=keyVal) return item except boto.dynamodb2.exceptions.ItemNotFound: return None else: print "Table does not exist -- ", tableName
class AwsJobFactory(JobFactory): ''' AWS specific implementation of the JobFactory ''' def __init__(self, config, strategy_factory): ''' Constructor. @param config Configuration settings. Requires the following definitions: Section: database Key: jobs_table Type: string Desc: Name of the NoSQL table containing the job records @paramType ConfigParser @param strategy_factory Interface for marshalling polygon strategies @paramType PolygonStrategyFactory @returns n/a ''' assert config is not None assert strategy_factory is not None self.jobs = Table(config.get('database', 'jobs_table')) self.strategy_factory = strategy_factory def create_job(self, task, polygon_strategy, num_sub_areas): ''' {@inheritDocs} ''' assert task is not None assert num_sub_areas > 0, num_sub_areas job_id = str(uuid4()) result = self.jobs.put_item(data={ 'id' : job_id, 'is_finished' : False, 'num_sub_areas' : num_sub_areas, 'polygon_strategy' : json.dumps(polygon_strategy.to_dict()), 'results' : '[]', 'run_times' : '{}', 'task' : task }) if result is False: raise CreateError("Failed to create job(%s)!" % job_id) return job_id def get_job(self, job_id): ''' {@inheritDocs} ''' record = self.jobs.get_item(id = str(job_id)) if record is None: raise ReadError("Job(%s) does not exist!" % job_id) polygon_strategy = self.strategy_factory.from_dict(json.loads(record['polygon_strategy'])) return AwsJob(record, polygon_strategy)
def getSecret(name, version="", region="us-east-1", table="credential-store", context=None): ''' fetch and decrypt the secret called `name` ''' if not context: context = {} secretStore = Table(table, connection=boto.dynamodb2.connect_to_region(region)) if version == "": # do a consistent fetch of the credential with the highest version result_set = [ x for x in secretStore.query_2( limit=1, reverse=True, consistent=True, name__eq=name) ] if not result_set: raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name) material = result_set[0] else: material = secretStore.get_item(name=name, version=version) kms = boto3.client('kms', region_name=region) # Check the HMAC before we decrypt to verify ciphertext integrity try: kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context) except boto.kms.exceptions.InvalidCiphertextException: if context is None: msg = ("Could not decrypt hmac key with KMS. The credential may " "require that an encryption context be provided to decrypt " "it.") else: msg = ("Could not decrypt hmac key with KMS. The encryption " "context provided may not match the one used when the " "credential was stored.") raise KmsError(msg) except Exception as e: raise KmsError("Decryption error %s" % e) key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] hmac = HMAC(hmac_key, msg=b64decode(material['contents']), digestmod=SHA256) if hmac.hexdigest() != material['hmac']: raise IntegrityError("Computed HMAC on %s does not match stored HMAC" % name) dec_ctr = Counter.new(128) decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr) plaintext = decryptor.decrypt(b64decode( material['contents'])).decode("utf-8") return plaintext
def verify_pwd_shelter(email, pwd, dynamo): user_table = Table('Shelter_Table',connection=dynamo) user_data_exist = user_table.has_item(UserID=md5(email)) if user_data_exist: user_data = user_table.get_item(UserID=md5(email)) else: return None if user_data["Password"] == md5(pwd): return user_data['UserID'] else: return None
def get_user_updates(username, object_uuid, table_name): conn = connect_to_dynamo() try: table = Table(table_name=get_table_name(table_name), connection=conn) except JSONResponseError as e: logger.exception("Table %s returned JSONResponse Error" % table_name) raise e try: res = table.get_item(parent_object=object_uuid, user=username) except ItemNotFound: return {} return dict(res)
class TaskOperation(object): def __init__(self, user_id, event_id): self.__user_id = user_id self.__event_id = event_id self.table = Table('Event') self.event_item = self.table.get_item(event_id=event_id) self.db_event = EventDBOperation() def invite_members_to_task(self, members): current_members = self.event_item['info']['members'] member_status = self.event_item['info']['member_status'] temp = member_status.copy() temp.update(dict.fromkeys(members, 0)) new_members = current_members + members task_name = self.event_item['info']['task_name'] self.event_item['info']['members'] = new_members self.event_item['info']['member_status'] = temp self.event_item.partial_save() self.db_event.bulk_create_new_event_per_user(self.__event_id, EVENT_TYPE, members) return task_name def accept_task(self, message=None): self.event_item['info']['member_status'][self.__user_id] = 1 self.event_item.partial_save() if message: self.db_event.save_comment_by_event_id(self.__event_id, message) self.db_event.update_event_to_new(self.__event_id) def reject_task(self, message=None): self.event_item['info']['member_status'][self.__user_id] = 2 members = self.event_item['info']['members'] if self.__user_id in members: members.remove(self.__user_id) self.event_item['info']['members'] = members self.event_item.partial_save() self.db_event.remove_event_per_user(self.__event_id, self.__user_id) if message: self.db_event.save_comment_by_event_id(self.__event_id, message) self.db_event.update_event_to_new(self.__event_id) def exit_task(self, message=None): self.event_item['info']['member_status'][self.__user_id] = 3 members = self.event_item['info']['members'] if self.__user_id in members: members.remove(self.__user_id) self.event_item['info']['members'] = members self.event_item.partial_save() self.db_event.remove_event_per_user(self.__event_id, self.__user_id) if message: self.db_event.save_comment_by_event_id(self.__event_id, message) self.db_event.update_event_to_new(self.__event_id)
def increaseInstances(emr_conn, jobflowId, numInstances, dynamodb_conn, tblName): table = Table(tblName) ig = emr_conn.list_instance_groups(jobflowId) if str(ig.__dict__['instancegroups'] [0].__dict__['instancegrouptype']) == 'CORE': igCoreId = str(ig.__dict__['instancegroups'][0].__dict__['id']) else: igCoreId = str(ig.__dict__['instancegroups'][1].__dict__['id']) emr_conn.modify_instance_groups(igCoreId, numInstances) jobflowInfo = table.get_item(jobid=1) jobflowInfo['numinstances'] = numInstances jobflowInfo.save(overwrite=True)
def test_get_item(self): self.storage_mocker.StubOutWithMock(storage, 'select_item') blob_data1 = bytes(bytearray([1, 2, 3, 4, 5])) blob_data2 = bytes(bytearray([5, 4, 3, 2, 1])) hash_key = "4.5621201231232132132132132132132142354E126" range_key = "range" storage.select_item( IgnoreArg(), IgnoreArg(), IgnoreArg(), select_type=IgnoreArg(), limit=IgnoreArg(), consistent=IgnoreArg() ).AndReturn( models.SelectResult(items=[{ "hash_key": models.AttributeValue(models.ATTRIBUTE_TYPE_NUMBER, decimal.Decimal(hash_key)), "range_key": models.AttributeValue(models.ATTRIBUTE_TYPE_STRING, range_key), "value_blob": models.AttributeValue(models.ATTRIBUTE_TYPE_BLOB, blob_data1), "value_blob_set": models.AttributeValue(models.ATTRIBUTE_TYPE_BLOB_SET, set([blob_data1, blob_data2])) }])) self.storage_mocker.ReplayAll() table = Table('test_table', connection=self.DYNAMODB_CON) item = table.get_item(consistent=False, hash_key=1, range_key="range") expected_item = { "hash_key": decimal.Decimal(hash_key), "range_key": range_key, "value_blob": types.Binary(blob_data1), "value_blob_set": set([types.Binary(blob_data1), types.Binary(blob_data2)]) } self.assertDictEqual(expected_item, dict(item.items())) self.storage_mocker.VerifyAll()
class dynamodb_dao(object): """ Data access object for Dynamo DB tables. """ def __init__(self): """ Initialize the instance that connects to "word_sorted_urls" table in DynamoDB. """ conn = connect_to_region("us-east-1", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) self.word_sorted_urls = Table("word_sorted_urls", connection=conn) def get_sorted_urls(self, word): """ Given a word, return a list of urls such that the word exists in each url. The urls in the list is sorted from the highest page rank to lowest.""" # Took 0.22 seconds return self.word_sorted_urls.get_item(word=str(word))['sorted_urls']
def get_site_obs(conn, site): obs_table = Table('vertical_positions', connection=conn) item = obs_table.get_item(site=site) item_keys = item.keys() item_values = item.values() item_dict = {} for i in range(0, len(item_keys)): if item_keys[i] != 'site': try: item_dict.update({item_keys[i]: item_values[i][0]['pos']}) except: sleep(3) item_dict.update({item_keys[i]: item_values[i][0]['pos']}) return item_dict
def createNewStreamingJob(dynamodb_conn, configValues): table = Table(configValues['ddbTableNameForState']) emr_conn = conn_to_emr() if str(configValues['vpcSubnetId']) != '': dict_subnet = {"Instances.Ec2SubnetId": configValues['vpcSubnetId']} else: dict_subnet = {} try: jobid = emr_conn.run_jobflow( name=configValues['jobflowName'], log_uri=configValues['logS3Uri'], steps=[], action_on_failure='CANCEL_AND_WAIT', master_instance_type=configValues['masterInstanceType'], slave_instance_type=configValues['slaveInstanceType'], num_instances=int(configValues['numInstances']), ami_version=configValues['amiVersion'], keep_alive=True, enable_debugging=True, job_flow_role=configValues['jobFlowRole'], service_role=configValues['serviceRole'], ec2_keyname=configValues['ec2KeyName'], api_params=dict_subnet, visible_to_all_users=True) except: return 2 emr_conn.set_termination_protection(jobid, True) state = check_cluster_running(emr_conn, jobid) try: jobflowId = table.get_item(jobid=1) jobflowId['jobflowid'] = jobid jobflowId['state'] = state jobflowId['numinstances'] = configValues['numInstances'] jobflowId['terminationprotect'] = configValues['terminationProtect'] jobflowId.save(overwrite=True) except boto.dynamodb2.exceptions.ItemNotFound: try: table.put_item( data={ 'jobid': 1, 'jobflowid': jobid, 'state': state, 'numinstances': configValues['numInstances'], 'terminationprotect': configValues['terminationProtect'] }) return 0 except: return 1 except: return 1
def toggleTermProtect(emr_conn, jobflowId, terminationProtect, dynamodb_conn, tblName): table = Table(tblName) if terminationProtect.upper() == 'TRUE': protection = bool(True) elif terminationProtect.upper() == 'FALSE': protection = bool(False) else: logger.error( 'Termination protection should be either True or False!!!') sys.exit(1) emr_conn.set_termination_protection(jobflowId, protection) jobflowInfo = table.get_item(jobid=1) jobflowInfo['terminationprotect'] = str(terminationProtect) jobflowInfo.save(overwrite=True)