def _setup(self): # initialize helper classes for interacting with GCE, GCS self.auth_http = instance.oauth_authorization(self.config) self.gce_helper = gce.Gce(self.auth_http, self.config, project_id=self.config['compute']['project']) self.gcs_helper = gcs.Gcs(self.auth_http, self.config, project_id=self.config['compute']['project']) self.instance_manager = instance.VMInstanceManager() self.instances = [] self.launched_instances = {} self.user_terminated = False # get job id self.id = gen_job_id() self.job_name_root = 'job-%s' %(self.id) if self.config['update']: self.job_name_root = 'job-updater-%s' %(self.id) # setup instance completion api call service_not_ready = True while service_not_ready: try: service = discovery.build('storage', self.config['compute']['api_version'], http=self.gce_helper.auth_http) self.bucket_req = service.objects().list(bucket=self.config['compute']['bucket'], prefix=self.job_name_root) service_not_ready = False except (ValueError, Exception) as e: logging.info('Connection failed. Retrying...')
def stage(self): """ Stages changes by creating update disks, if they don't already exist. """ # setup vars compute_config = self.config_['compute'] created_snapshots = False if not self.update_data_disks_: self.compute_update_data_disks() # authorize access to GCE api auth_http = instance.oauth_authorization(self.config_) gce_helper = gce.Gce(auth_http, self.config_, project_id=compute_config['project']) # for all zones, create a disk snapshot if they don't already exist for zone, disk, update_disk_name in zip(compute_config['zones'], compute_config['data_disks'], self.update_data_disks_): # check for existence of the update disk (taken as a flag for the existence of an update node) disk_valid = gce_helper.get_disk(update_disk_name, zone) if not disk_valid: # create a snapshot of the current disk logging.info('Snapshotting disk %s' %(disk)) snapshot_response = gce_helper.snapshot_disk(disk, compute_config['project'], zone) # create a disk from the snapshot logging.info('Creating update disk %s from snapshot %s' %(update_disk_name, snapshot_response['snapshot_name'])) gce_helper.create_disk(update_disk_name, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete the snapshot ss_del_response = gce_helper.delete_snapshot(snapshot_name=snapshot_response['snapshot_name'], project=compute_config['project']) created_snapshots = True return created_snapshots
def _setup(self): # initialize helper classes for interacting with GCE, GCS self.auth_http = instance.oauth_authorization(self.config) self.gce_helper = gce.Gce(self.auth_http, self.config, project_id=self.config['compute']['project']) self.gcs_helper = gcs.Gcs(self.auth_http, self.config, project_id=self.config['compute']['project']) self.instance_manager = instance.VMInstanceManager() self.instances = [] self.launched_instances = {} self.user_terminated = False # get job id self.id = gen_job_id() self.job_name_root = 'job-%s' % (self.id) if self.config['update']: self.job_name_root = 'job-updater-%s' % (self.id) # setup instance completion api call service_not_ready = True while service_not_ready: try: service = discovery.build( 'storage', self.config['compute']['api_version'], http=self.gce_helper.auth_http) self.bucket_req = service.objects().list( bucket=self.config['compute']['bucket'], prefix=self.job_name_root) service_not_ready = False except (ValueError, Exception) as e: logging.info('Connection failed. Retrying...')
def push(self): """ Pushed changes by replacing original disks with update disks. Super critical section. """ # setup vars compute_config = self.config_['compute'] dt_now = dt.datetime.now() if not self.update_data_disks_: self.compute_update_data_disks() # authorize access to GCE api auth_http = instance.oauth_authorization(self.config_) gce_helper = gce.Gce(auth_http, self.config_, project_id=compute_config['project']) for zone, disk, update_disk in zip(compute_config['zones'], compute_config['data_disks'], self.update_data_disks_): # check for update disk existence disk_response = gce_helper.get_disk(update_disk, zone) if not disk_response: logging.error('Update disk %s does not exist' %(update_disk)) continue # generate backup disk filename backup_disk = '%s-backup-%s-%s-%s-%sh-%sm-%ss' %(disk, dt_now.month, dt_now.day, dt_now.year, dt_now.hour, dt_now.minute, dt_now.second) # snapshot the updated data disks snapshot_response = gce_helper.snapshot_disk(update_disk, compute_config['project'], zone) # delete previous disk and replace, if not in use disk_response = gce_helper.get_disk(disk, zone) if disk_response: if USERS_KEY not in disk_response.keys() or (USERS_KEY in disk_response.keys() and len(disk_response[USERS_KEY]) == 0): # create new disk from snapshot gce_helper.delete_disk(disk) gce_helper.create_disk(disk, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete update disk (don't delete if push can't be done now, otherwise changes won't be overwritten) gce_helper.delete_disk(update_disk) elif USERS_KEY in disk_response.keys() and len(disk_response[USERS_KEY]) > 0: # stage the push for a future time logging.info('Master disk in use. Staging backup disk for a future push') push_queue_filename = os.path.join(self.cache_dir_, PUSH_QUEUE_FILE) f = open(push_queue_filename, 'a') f.write(backup_disk + '\n') else: logging.warning('Master disk was not found') # create backup disk from snapshot gce_helper.create_disk(backup_disk, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete the snapshot ss_del_response = gce_helper.delete_snapshot(snapshot_name=snapshot_response['snapshot_name'], project=compute_config['project']) return True
def stage(self): """ Stages changes by creating update disks, if they don't already exist. """ # setup vars compute_config = self.config_['compute'] created_snapshots = False if not self.update_data_disks_: self.compute_update_data_disks() # authorize access to GCE api auth_http = instance.oauth_authorization(self.config_) gce_helper = gce.Gce(auth_http, self.config_, project_id=compute_config['project']) # for all zones, create a disk snapshot if they don't already exist for zone, disk, update_disk_name in zip(compute_config['zones'], compute_config['data_disks'], self.update_data_disks_): # check for existence of the update disk (taken as a flag for the existence of an update node) disk_valid = gce_helper.get_disk(update_disk_name, zone) if not disk_valid: # create a snapshot of the current disk logging.info('Snapshotting disk %s' % (disk)) snapshot_response = gce_helper.snapshot_disk( disk, compute_config['project'], zone) # create a disk from the snapshot logging.info( 'Creating update disk %s from snapshot %s' % (update_disk_name, snapshot_response['snapshot_name'])) gce_helper.create_disk( update_disk_name, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete the snapshot ss_del_response = gce_helper.delete_snapshot( snapshot_name=snapshot_response['snapshot_name'], project=compute_config['project']) created_snapshots = True return created_snapshots
def push(self): """ Pushed changes by replacing original disks with update disks. Super critical section. """ # setup vars compute_config = self.config_['compute'] dt_now = dt.datetime.now() if not self.update_data_disks_: self.compute_update_data_disks() # authorize access to GCE api auth_http = instance.oauth_authorization(self.config_) gce_helper = gce.Gce(auth_http, self.config_, project_id=compute_config['project']) for zone, disk, update_disk in zip(compute_config['zones'], compute_config['data_disks'], self.update_data_disks_): # check for update disk existence disk_response = gce_helper.get_disk(update_disk, zone) if not disk_response: logging.error('Update disk %s does not exist' % (update_disk)) continue # generate backup disk filename backup_disk = '%s-backup-%s-%s-%s-%sh-%sm-%ss' % ( disk, dt_now.month, dt_now.day, dt_now.year, dt_now.hour, dt_now.minute, dt_now.second) # snapshot the updated data disks snapshot_response = gce_helper.snapshot_disk( update_disk, compute_config['project'], zone) # delete previous disk and replace, if not in use disk_response = gce_helper.get_disk(disk, zone) if disk_response: if USERS_KEY not in disk_response.keys() or ( USERS_KEY in disk_response.keys() and len(disk_response[USERS_KEY]) == 0): # create new disk from snapshot gce_helper.delete_disk(disk) gce_helper.create_disk( disk, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete update disk (don't delete if push can't be done now, otherwise changes won't be overwritten) gce_helper.delete_disk(update_disk) elif USERS_KEY in disk_response.keys() and len( disk_response[USERS_KEY]) > 0: # stage the push for a future time logging.info( 'Master disk in use. Staging backup disk for a future push' ) push_queue_filename = os.path.join(self.cache_dir_, PUSH_QUEUE_FILE) f = open(push_queue_filename, 'a') f.write(backup_disk + '\n') else: logging.warning('Master disk was not found') # create backup disk from snapshot gce_helper.create_disk( backup_disk, zone=zone, size_gb=compute_config['disk_size_gb'], source_snapshot=snapshot_response['snapshot_name']) # delete the snapshot ss_del_response = gce_helper.delete_snapshot( snapshot_name=snapshot_response['snapshot_name'], project=compute_config['project']) return True