Beispiel #1
0
    def _backup(self, clear_volume=False):
        JBoxVol.log_info("Backing up " + self.sessname + " at " + str(JBoxVol.BACKUP_LOC))

        bkup_file = os.path.join(JBoxVol.BACKUP_LOC, self.sessname + ".tar.gz")
        bkup_tar = tarfile.open(bkup_file, 'w:gz')

        for f in os.listdir(self.disk_path):
            if f.startswith('.') and (f in ['.julia', '.ipython']):
                continue
            full_path = os.path.join(self.disk_path, f)
            bkup_tar.add(full_path, os.path.join('juser', f))
        bkup_tar.close()
        os.chmod(bkup_file, 0666)

        if clear_volume:
            ensure_delete(self.disk_path)

        # Upload to S3 if so configured. Delete from local if successful.
        bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \
            datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET)
        if JBoxVol.BACKUP_BUCKET is not None:
            if CloudHost.push_file_to_s3(JBoxVol.BACKUP_BUCKET, bkup_file,
                                           metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None:
                os.remove(bkup_file)
                JBoxVol.log_info("Moved backup to S3 " + self.sessname)
Beispiel #2
0
    def _backup(self, clear_volume=False):
        JBoxVol.log_info("Backing up " + self.sessname + " at " +
                         str(JBoxVol.BACKUP_LOC))

        bkup_file = os.path.join(JBoxVol.BACKUP_LOC, self.sessname + ".tar.gz")
        bkup_tar = tarfile.open(bkup_file, 'w:gz')

        for f in os.listdir(self.disk_path):
            if f.startswith('.') and (f in ['.julia', '.juliabox']):
                continue
            full_path = os.path.join(self.disk_path, f)
            bkup_tar.add(full_path, os.path.join('juser', f))
        bkup_tar.close()
        os.chmod(bkup_file, 0666)

        if clear_volume:
            ensure_delete(self.disk_path)

        # Upload to S3 if so configured. Delete from local if successful.
        bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \
            datetime.timedelta(seconds=JBoxVol.LOCAL_TZ_OFFSET)
        if JBoxVol.BACKUP_BUCKET is not None:
            if CloudHost.push_file_to_s3(
                    JBoxVol.BACKUP_BUCKET,
                    bkup_file,
                    metadata={'backup_time':
                              bkup_file_mtime.isoformat()}) is not None:
                os.remove(bkup_file)
                JBoxVol.log_info("Moved backup to S3 " + self.sessname)
Beispiel #3
0
 def create_disk():
     disk_id = JBoxContainer.get_unused_disk_id()
     if disk_id < 0:
         raise Exception("No free disk available")
     disk_path = os.path.join(JBoxContainer.FS_LOC, str(disk_id))
     ensure_delete(disk_path)
     JBoxContainer.restore_user_home(disk_path)
     JBoxContainer.setup_instance_config(disk_path)
     return disk_id, disk_path
Beispiel #4
0
 def refresh_disk(self, mark_refreshed=True):
     self.log_debug("blanking out disk at %s", self.disk_path)
     ensure_delete(self.disk_path)
     self.log_debug("restoring common data on disk at %s", self.disk_path)
     self.restore_user_home()
     self.setup_instance_config()
     if mark_refreshed:
         self.mark_refreshed()
     self.log_debug("refreshed disk at %s", self.disk_path)
Beispiel #5
0
 def refresh_disk(self, mark_refreshed=True):
     self.log_debug("blanking out disk at %s", self.disk_path)
     ensure_delete(self.disk_path)
     self.log_debug("restoring common data on disk at %s", self.disk_path)
     self.restore_user_home(True)
     self.setup_instance_config()
     if mark_refreshed:
         self.mark_refreshed()
     self.log_debug("refreshed disk at %s", self.disk_path)
Beispiel #6
0
    def backup(self):
        JBoxContainer.log_info("Backing up " + self.debug_str() + " at " + str(JBoxContainer.BACKUP_LOC))
        cname = self.get_name()
        if cname is None:
            return

        bkup_file = os.path.join(JBoxContainer.BACKUP_LOC, cname[1:] + ".tar.gz")

        if not self.is_running():
            k = JBoxContainer.pull_from_s3(bkup_file, True)
            bkup_file_mtime = None
            if k is not None:
                bkup_file_mtime = JBoxContainer.parse_iso_time(k.get_metadata('backup_time'))
            elif os.path.exists(bkup_file):
                bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + \
                                  datetime.timedelta(seconds=JBoxContainer.LOCAL_TZ_OFFSET)

            if bkup_file_mtime is not None:
                tstart = self.time_started()
                tstop = self.time_finished()
                tcomp = tstart if ((tstop is None) or (tstart > tstop)) else tstop
                if tcomp <= bkup_file_mtime:
                    JBoxContainer.log_info("Already backed up " + self.debug_str())
                    return

        disk_ids_used = self.get_disk_ids_used()
        if len(disk_ids_used) > 0:
            if len(disk_ids_used) > 1:
                JBoxContainer.log_info("Can not backup more than one disks per user yet. Backing up the first disk.")
        elif len(disk_ids_used) == 0:
            JBoxContainer.log_info("No disks to backup")
            return

        disk_id_used = disk_ids_used[0]
        disk_path = os.path.join(JBoxContainer.FS_LOC, str(disk_id_used))
        bkup_tar = tarfile.open(bkup_file, 'w:gz')

        for f in os.listdir(disk_path):
            if f.startswith('.') and (f in ['.julia', '.ipython']):
                continue
            full_path = os.path.join(disk_path, f)
            bkup_tar.add(full_path, os.path.join('juser', f))
        bkup_tar.close()
        os.chmod(bkup_file, 0666)
        ensure_delete(disk_path)

        # Upload to S3 if so configured. Delete from local if successful.
        bkup_file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(bkup_file), pytz.utc) + datetime.timedelta(
            seconds=JBoxContainer.LOCAL_TZ_OFFSET)
        if JBoxContainer.BACKUP_BUCKET is not None:
            if CloudHelper.push_file_to_s3(JBoxContainer.BACKUP_BUCKET, bkup_file,
                                            metadata={'backup_time': bkup_file_mtime.isoformat()}) is not None:
                os.remove(bkup_file)
                JBoxContainer.log_info("Moved backup to S3 " + self.debug_str())
Beispiel #7
0
    def restore_user_home(self, new_disk):
        with tarfile.open(JBoxVol.USER_HOME_IMG, 'r:gz') as user_home:
            if new_disk:
                user_home.extractall(self.disk_path)
            else:
                # extract .juliabox, .ipython/README, .ipython/kernels, .ipython/profile_julia, .ipython/profile_default

                for path in JBoxVol.USER_HOME_ESSENTIALS:
                    full_path = os.path.join(self.disk_path, path)
                    if os.path.exists(full_path):
                        ensure_delete(full_path, include_itself=True)

                for info in user_home.getmembers():
                    if not JBoxVol._is_path_user_home_essential(info.name):
                        continue
                    user_home.extract(info, self.disk_path)
Beispiel #8
0
    def restore_user_home(self, new_disk):
        with tarfile.open(JBoxVol.USER_HOME_IMG, 'r:gz') as user_home:
            if new_disk:
                user_home.extractall(self.disk_path)
            else:
                # extract .juliabox, .ipython/README, .ipython/kernels,
                # .ipython/profile_julia, .ipython/profile_default, .ipython/profile_jboxjulia

                for path in JBoxVol.USER_HOME_ESSENTIALS:
                    full_path = os.path.join(self.disk_path, path)
                    if os.path.exists(full_path):
                        ensure_delete(full_path, include_itself=True)

                for info in user_home.getmembers():
                    if not JBoxVol._is_path_user_home_essential(info.name):
                        continue
                    user_home.extract(info, self.disk_path)