def push_backup(user_email, disk_path): sessname = unique_sessname(user_email) S3Disk.log_info("pushing %s.tar.gz from %s to %s", sessname, disk_path, JBoxVol.BACKUP_BUCKET) bkup_file = os.path.join('/tmp', sessname + ".tar.gz") bkup_tar = tarfile.open(bkup_file, 'w:gz') def set_perms(tinfo): tinfo.uid = 1000 tinfo.gid = 1000 tinfo.uname = 'ubuntu' tinfo.gname = 'ubuntu' return tinfo for f in os.listdir(disk_path): if f.startswith('.') and (f in ['.julia', '.juliabox']): continue full_path = os.path.join(disk_path, f) bkup_tar.add(full_path, os.path.join('juser', f), filter=set_perms) bkup_tar.close() os.chmod(bkup_file, 0666) # Upload to S3 if so configured. Delete from local if successful. bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \ datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET) if JBoxVol.BACKUP_BUCKET is not None: if CloudHost.push_file_to_s3(JBoxVol.BACKUP_BUCKET, bkup_file, metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None: os.remove(bkup_file) S3Disk.log_info("Moved backup to S3 " + sessname)
def _backup(self, clear_volume=False): JBoxVol.log_info("Backing up " + self.sessname + " at " + str(JBoxVol.BACKUP_LOC)) bkup_file = os.path.join(JBoxVol.BACKUP_LOC, self.sessname + ".tar.gz") bkup_tar = tarfile.open(bkup_file, 'w:gz') for f in os.listdir(self.disk_path): if f.startswith('.') and (f in ['.julia', '.ipython']): continue full_path = os.path.join(self.disk_path, f) bkup_tar.add(full_path, os.path.join('juser', f)) bkup_tar.close() os.chmod(bkup_file, 0666) if clear_volume: ensure_delete(self.disk_path) # Upload to S3 if so configured. Delete from local if successful. bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \ datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET) if JBoxVol.BACKUP_BUCKET is not None: if CloudHost.push_file_to_s3(JBoxVol.BACKUP_BUCKET, bkup_file, metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None: os.remove(bkup_file) JBoxVol.log_info("Moved backup to S3 " + self.sessname)
def push_backup(user_email, disk_path): sessname = unique_sessname(user_email) S3Disk.log_info("pushing %s.tar.gz from %s to %s", sessname, disk_path, JBoxVol.BACKUP_BUCKET) bkup_file = os.path.join('/tmp', sessname + ".tar.gz") bkup_tar = tarfile.open(bkup_file, 'w:gz') def set_perms(tinfo): tinfo.uid = 1000 tinfo.gid = 1000 tinfo.uname = 'ubuntu' tinfo.gname = 'ubuntu' return tinfo for f in os.listdir(disk_path): if f.startswith('.') and (f in ['.julia', '.juliabox']): continue full_path = os.path.join(disk_path, f) bkup_tar.add(full_path, os.path.join('juser', f), filter=set_perms) bkup_tar.close() os.chmod(bkup_file, 0666) # Upload to S3 if so configured. Delete from local if successful. bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \ datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET) if JBoxVol.BACKUP_BUCKET is not None: if CloudHost.push_file_to_s3( JBoxVol.BACKUP_BUCKET, bkup_file, metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None: os.remove(bkup_file) S3Disk.log_info("Moved backup to S3 " + sessname)
def _backup(self, clear_volume=False): JBoxVol.log_info("Backing up " + self.sessname + " at " + str(JBoxVol.BACKUP_LOC)) bkup_file = os.path.join(JBoxVol.BACKUP_LOC, self.sessname + ".tar.gz") bkup_tar = tarfile.open(bkup_file, 'w:gz') for f in os.listdir(self.disk_path): if f.startswith('.') and (f in ['.julia', '.juliabox']): continue full_path = os.path.join(self.disk_path, f) bkup_tar.add(full_path, os.path.join('juser', f)) bkup_tar.close() os.chmod(bkup_file, 0666) if clear_volume: ensure_delete(self.disk_path) # Upload to S3 if so configured. Delete from local if successful. bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \ datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET) if JBoxVol.BACKUP_BUCKET is not None: if CloudHost.push_file_to_s3( JBoxVol.BACKUP_BUCKET, bkup_file, metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None: os.remove(bkup_file) JBoxVol.log_info("Moved backup to S3 " + self.sessname)
has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], has_ebs=cloud_cfg['ebs'], has_ses=cloud_cfg['ses'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) VolMgr.configure(dckr, cfg) ts = JBoxVol._get_user_home_timestamp() VolMgr.log_debug("user_home_timestamp: %s", ts.strftime("%Y%m%d_%H%M")) img_dir, img_file = os.path.split(JBoxVol.USER_HOME_IMG) new_img_file_name = 'user_home_' + ts.strftime("%Y%m%d_%H%M") + '.tar.gz' new_img_file = os.path.join(img_dir, new_img_file_name) shutil.copyfile(JBoxVol.USER_HOME_IMG, new_img_file) VolMgr.log_debug("new image file is at : %s", new_img_file) bucket = 'juliabox-user-home-templates' VolMgr.log_debug("pushing new image file to s3 at: %s", bucket) CloudHost.push_file_to_s3(bucket, new_img_file) for cluster in ['JuliaBoxTest', 'JuliaBox']: VolMgr.log_debug("setting image for cluster: %s", cluster) JBoxDynConfig.set_user_home_image(cluster, bucket, new_img_file_name)