def _create_bucket_configs(self): for bucket in ['files', 'storage']: S3.upload_string(self._format_name('config'), bucket, json.dumps( {"bucket_name": self._format_name(bucket)}), partition=False)
def _create_topics_config(self): topics = {"topic": {}} for topic, topic_arn_label in (('multipage', 'MultipageSNS'), ('migrationdownload', 'MigrationDownloadSNS'), ('migrationupload', 'MigrationUploadSNS')): topics['topic'].update({topic: self.stack_mgr.stack_data['cms'][topic_arn_label]}) S3.upload_string(self._format_name('config'), 'topics', json.dumps(topics), partition=False)
def _create_config_database(self): conn = boto.rds2.connect_to_region(self.args.region) while True: try: describe = conn.describe_db_instances( self.tmpl_args['rds']['instance_id']) info = describe['DescribeDBInstancesResponse'][ 'DescribeDBInstancesResult']['DBInstances'][0] if info['DBInstanceStatus'] == 'available': # check if we have it in config already try: self.rds_config = json.loads( S3.get_string(self._format_name('config'), 'database')) except boto.exception.S3ResponseError: self.rds_config[ 'database'] = 'postgres://{0}:{1}@{2}:{3}/{4}'.format( self.tmpl_args['rds']['username'], self.tmpl_args['rds']['password'], info['Endpoint']['Address'], '5432', self.tmpl_args['rds']['db_name']) break print 'rds', info['DBInstanceStatus'] except DBInstanceNotFound as e: print 'Cannot find instance', str(e) time.sleep(10) S3.upload_string(self._format_name('config'), 'database', json.dumps(self.rds_config), partition=False)
def _create_queues_config(self): queues = {"queue": {}} for queue, queue_arn_label in (('multipage', 'MultipageSQS'), ('migrationdownload', 'MigrationDownloadSQS'), ('migrationupload', 'MigrationUploadSQS')): queues['queue'].update({queue: self.stack_mgr.stack_data['cms'][queue_arn_label]}) S3.upload_string(self._format_name('config'), 'queues', json.dumps(queues), partition=False)
def _create_config_database(self): conn = boto.rds2.connect_to_region(self.args.region) while True: try: describe = conn.describe_db_instances(self.tmpl_args['rds']['instance_id']) info = describe['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0] if info['DBInstanceStatus'] == 'available': # check if we have it in config already try: self.rds_config = json.loads(S3.get_string(self._format_name('config'), 'database')) except boto.exception.S3ResponseError: self.rds_config['database'] = 'postgres://{0}:{1}@{2}:{3}/{4}'.format( self.tmpl_args['rds']['username'], self.tmpl_args['rds']['password'], info['Endpoint']['Address'], '5432', self.tmpl_args['rds']['db_name'] ) break print 'rds', info['DBInstanceStatus'] except DBInstanceNotFound as e: print 'Cannot find instance', str(e) time.sleep(10) S3.upload_string(self._format_name('config'), 'database', json.dumps(self.rds_config), partition=False)
def _create_cms_config(self): S3.upload_string( self._format_name('config'), 'cms', json.dumps({ 'dns': self.stack_mgr.stack_data['cms']['CMSFQDN'], 'name': self.stack_mgr.stack_data['cms']['CMSLoadBalancerName'] }), partition=False)
def _upload_config_registry(self): for filename in ['blueprint', 'document', 'jobs', 'admin_rules']: data = json.loads( open( resource_filename( 'hermes_cloud', 'data/config_registry/{0}'.format(filename))).read()) S3.upload_string(self._format_name('config'), filename, json.dumps(data), partition=False)
def _create_topics_config(self): topics = {"topic": {}} for topic, topic_arn_label in (('multipage', 'MultipageSNS'), ('migrationdownload', 'MigrationDownloadSNS'), ('migrationupload', 'MigrationUploadSNS')): topics['topic'].update( {topic: self.stack_mgr.stack_data['cms'][topic_arn_label]}) S3.upload_string(self._format_name('config'), 'topics', json.dumps(topics), partition=False)
def _create_queues_config(self): queues = {"queue": {}} for queue, queue_arn_label in (('multipage', 'MultipageSQS'), ('migrationdownload', 'MigrationDownloadSQS'), ('migrationupload', 'MigrationUploadSQS')): queues['queue'].update( {queue: self.stack_mgr.stack_data['cms'][queue_arn_label]}) S3.upload_string(self._format_name('config'), 'queues', json.dumps(queues), partition=False)
def test_upload_string(arrow_mock): conn = boto.connect_s3() conn.create_bucket('source-bucket') mocked_date = arrow.get('2015-01-1') arrow_mock.utcnow.return_value = mocked_date the_date = mocked_date.date() expected = '%s/%s/%s/%s' % (the_date.day, the_date.month, the_date.year, 'test-name') assert expected == S3.upload_string('source-bucket', 'test-name', 'my string')
def test_upload_string_no_partition(): conn = boto.connect_s3() conn.create_bucket('source-bucket') assert 'test-name' == S3.upload_string('source-bucket', 'test-name', 'my string', partition=False)
def _create_region_config(self): S3.upload_string(self._format_name('config'), 'region', json.dumps({'region': self.args.region}), partition=False)
def _create_cms_config(self): S3.upload_string(self._format_name('config'), 'cms', json.dumps({ 'dns': self.stack_mgr.stack_data['cms']['CMSFQDN'], 'name': self.stack_mgr.stack_data['cms']['CMSLoadBalancerName'] }), partition=False)
def _create_bucket_configs(self): for bucket in ['files', 'storage']: S3.upload_string(self._format_name('config'), bucket, json.dumps({"bucket_name": self._format_name(bucket)}), partition=False)
def _upload_config_registry(self): for filename in ['blueprint', 'document', 'jobs', 'admin_rules']: data = json.loads(open(resource_filename('hermes_cloud', 'data/config_registry/{0}'.format(filename))).read()) S3.upload_string(self._format_name('config'), filename, json.dumps(data), partition=False)