Beispiel #1
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    stype = 'task_scheduler'
    aw = APIWrapper()
    if (tdo.task_type != 'snapshot'):
        logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
        return
    meta = json.loads(tdo.json_meta)
    validate_snap_meta(meta)
    share = Share.objects.get(name=meta['share'])
    max_count = int(float(meta['max_count']))
    prefix = ('%s_' % meta['prefix'])

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)

    snap_created = False
    t.state = 'error'
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT)))
        url = ('shares/%s/snapshots/%s' % (share.name, name))
        #only create a new snap if there's no overflow situation. This prevents
        #runaway snapshot creation beyond max_count+1.
        if(delete(aw, share, stype, prefix, max_count)):
            data = {'snap_type': stype,
                    'uvisible': meta['visible'], }
            headers = {'content-type': 'application/json'}
            aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False)
            logger.debug('created snapshot at %s' % url)
            t.state = 'finished'
            snap_created = True
    except Exception, e:
        logger.error('Failed to create snapshot at %s' % url)
        logger.exception(e)
Beispiel #2
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(
        cwindow
    ):  # Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state != "error" and ll.state != "finished":
                logger.debug("Non terminal state(%s) for task(%d). Checking " "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state != "error" and cur_state != "finished":
                    return logger.debug(
                        "Non terminal state(%s) for task(%d). " "A new task will not be run." % (cur_state, tid)
                    )

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception, e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    stype = 'task_scheduler'
    aw = APIWrapper()
    if (tdo.task_type != 'snapshot'):
        logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
        return
    meta = json.loads(tdo.json_meta)
    validate_snap_meta(meta)

    max_count = int(float(meta['max_count']))
    share = Share.objects.get(name=meta['share'])
    prefix = ('%s_' % meta['prefix'])
    snapshots = Snapshot.objects.filter(share=share, snap_type=stype,
                                        name__startswith=prefix).order_by('-id')
    if (len(snapshots) > max_count):
        for snap in snapshots[max_count:]:
            url = ('shares/%s/snapshots/%s' % (meta['share'], snap.name))
            try:
                aw.api_call(url, data=None, calltype='delete', save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return
Beispiel #4
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    stype = 'task_scheduler'
    aw = APIWrapper()
    if (tdo.task_type != 'snapshot'):
        logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
        return
    meta = json.loads(tdo.json_meta)
    validate_snap_meta(meta)

    max_count = int(float(meta['max_count']))
    share = Share.objects.get(name=meta['share'])
    prefix = ('%s_' % meta['prefix'])
    snapshots = Snapshot.objects.filter(
        share=share, snap_type=stype, name__startswith=prefix).order_by('-id')
    if (len(snapshots) > max_count):
        for snap in snapshots[max_count:]:
            url = ('shares/%s/snapshots/%s' % (meta['share'], snap.name))
            try:
                aw.api_call(url,
                            data=None,
                            calltype='delete',
                            save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return
Beispiel #5
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)): #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if (ll.state != 'error' and ll.state != 'finished'):
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if (cur_state != 'error' and cur_state != 'finished'):
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' % (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception, e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
Beispiel #6
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(id=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                    'writable': meta['writable'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
Beispiel #7
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            'pincardwelcome', {
                'key': 'pincardManager:pincardwelcome',
                'data': 'Welcome to Rockstor PincardManager'
            })
Beispiel #8
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {'key' : 'sysinfo:connected', 'data' : 'connected'})
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_localtime(self):
        
        while self.start:
            
            self.emit('localtime', {'key': 'sysinfo:localtime', 'data': time.strftime('%H:%M (%z %Z)')})
            gevent.sleep(40)

    def send_kernel_info(self):

            try:
                self.emit('kernel_info', {'key': 'sysinfo:kernel_info', 'data': kernel_info(self.supported_kernel)})
            except Exception as e:
                logger.error('Exception while gathering kernel info: %s' % e.__str__())
                # Emit an event to the front end to capture error report
                self.emit('kernel_error', {
                    'key': 'sysinfo:kernel_error', 'data': str(e)})
                self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update', data=None, calltype='post', save_error=False)
        except Exception, e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())
Beispiel #9
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            "pincardwelcome",
            {
                "key": "pincardManager:pincardwelcome",
                "data": "Welcome to Rockstor PincardManager",
            },
        )
Beispiel #10
0
class SysinfoNamespace(BaseNamespace, BroadcastMixin):
    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # Called before the connection is established
    def initialize(self):
        self.aw = APIWrapper()

    # This function is run once on every connection
    def recv_connect(self):
        self.emit("sysinfo:sysinfo", {
            "key": "sysinfo:connected", "data": "connected"
        })
        self.start = True
        gevent.spawn(self.update_storage_state)
        gevent.spawn(self.update_check)
        gevent.spawn(self.update_rockons)
        gevent.spawn(self.send_uptime)
        gevent.spawn(self.send_kernel_info)
        gevent.spawn(self.prune_logs)

    # Run on every disconnect
    def recv_disconnect(self):
        self.start = False
        self.disconnect()

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('sysinfo:uptime', {
                'data': uptime(), 'key': 'sysinfo:uptime'
            })
            gevent.sleep(60)

    def send_kernel_info(self):
            try:
                self.emit('sysinfo:kernel_info', {
                    'data': kernel_info(self.supported_kernel),
                    'key': 'sysinfo:kernel_info'
                })
            except Exception as e:
                logger.error('Exception while gathering kernel info: %s' % e.__str__())
                # Emit an event to the front end to capture error report
                self.emit('sysinfo:kernel_error', {
                    'data': str(e),
                    'key': 'sysinfo:kernel_error'
                })
                self.error('unsupported_kernel', str(e))

    def update_rockons(self):
        try:
            self.aw.api_call('rockons/update', data=None, calltype='post', save_error=False)
        except Exception, e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())
Beispiel #11
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug("Non terminal state(%s) for task(%d). Checking "
                             "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug("Non terminal state(%s) for task(%d). "
                                        "A new task will not be run." %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception as e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta["pool"], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug("task(%d) finished with state(%s)." %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug("pending state(%s) for scrub task(%d). Will check "
                         "again in 60 seconds." % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
Beispiel #12
0
def main():

    try:
        device_scan()
    except Exception as e:
        print('BTRFS device scan failed due to an exception. This indicates '
              'a serious problem. Aborting. Exception: %s' % e.__str__())
        sys.exit(1)
    print('BTRFS device scan complete')

    # if the appliance is not setup, there's nothing more to do beyond
    # device scan
    setup = Setup.objects.first()
    if (setup is None or setup.setup_user is False):
        print('Appliance is not yet setup.')
        return

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            time.sleep(2)
            aw.api_call('network')
            aw.api_call('commands/bootstrap', calltype='post')
            break
        except Exception as e:
            # Retry on every exception, primarily because of django-oauth
            # related code behaving unpredictably while setting
            # tokens. Retrying is a decent workaround for now(11302015).
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print('Exception occured while bootstrapping. This could be '
                  'because rockstor.service is still starting up. will '
                  'wait 2 seconds and try again. Exception: %s' % e.__str__())
            time.sleep(2)
            num_attempts += 1
    print('Bootstrapping complete')

    try:
        print('Running qgroup cleanup. %s' % QGROUP_CLEAN)
        run_command([QGROUP_CLEAN])
    except Exception as e:
        print('Exception while running %s: %s' % (QGROUP_CLEAN, e.__str__()))

    try:
        print('Running qgroup limit maxout. %s' % QGROUP_MAXOUT_LIMIT)
        run_command([QGROUP_MAXOUT_LIMIT])
    except Exception as e:
        print('Exception while running %s: %s' %
              (QGROUP_MAXOUT_LIMIT, e.__str__()))
Beispiel #13
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
Beispiel #14
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
Beispiel #15
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit("connected", {"key": "sysinfo:connected", "data": "connected"})
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)
Beispiel #17
0
class PincardManagerNamespace(RockstorIO):

    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('pincardwelcome', {
            'key': 'pincardManager:pincardwelcome', 'data': 'Welcome to Rockstor PincardManager'
        })

    def on_disconnect(self, sid):
        
        self.pins_user_uname = None
        self.pins_user_uid = None
        self.pins_check = None
        self.pass_reset_time = None
        self.otp = 'none'
        self.cleanup(sid)
    
    def on_generatepincard(self, sid, uid):
        
        def create_pincard(uid):

            try:
                url = 'pincardmanager/create/%s' % uid
                new_pincard = self.aw.api_call(url, data=None, calltype='post', save_error=False)
                self.emit('newpincard', {'key': 'pincardManager:newpincard', 'data': new_pincard})
            except Exception, e:
                logger.error('Failed to create Pincard with '
                             'exception: %s' % e.__str__())       

        self.spawn(create_pincard, sid, uid)
Beispiel #18
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2]
    if (
            crontabwindow.crontab_range(cwindow)
    ):  #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(name=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/%s/snapshots/%s' % (share.name, name))
            #only create a new snap if there's no overflow situation. This prevents
            #runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception, e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('pincardwelcome',
                  {
                      'key': 'pincardManager:pincardwelcome',
                      'data': 'Welcome to Rockstor PincardManager'
                  })
 def get(self, *args, **kwargs):
     try:
         auuid = self.kwargs.get('auuid', None)
         ao = Appliance.objects.get(uuid=auuid)
         url = ('https://%s:%s' % (ao.ip, ao.mgmt_port))
         aw = APIWrapper(client_id=ao.client_id, client_secret=ao.client_secret, url=url)
         response = aw.api_call('pools')
         res = [p['name'] for p in response['results']]
         return Response(res)
     except Appliance.DoesNotExist:
         msg = ('Remote appliance with the given uuid(%s) does not exist.' %
                auuid)
         handle_exception(Exception(msg), self.request)
     except Exception, e:
         msg = ('Failed to retrieve list of Pools on the remote '
                'appliance(%s). Make sure it is running and try again. '
                'Here is the exact error: %s' % (ao.ip, e.__str__()))
         handle_exception(Exception(msg), self.request)
Beispiel #21
0
def main():
    if (len(sys.argv) == 1):
        sys.exit('Usage: %s <pool name>' % sys.argv[0])
    pname = sys.argv[1]
    sname = 'qgroup-test-share1'
    size = 1024 * 1024  #1 GiB
    aw = APIWrapper()
    res = create_share(aw, sname, pname, size)
    print('Share(%s) created. Size: %d' % (sname, size))

    fill_up_share(pname, sname)
    #expand Share and fillup. repeat 3 times
    for i in range(3):
        size += (1024 * 512)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)

    #remove random files and fillup. repeat 3 times.
    for i in range(3):
        #expand a bit so we can actually remove some files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        fill_up_share(pname, sname)

    #remove random files, shrink the pool by half of free'd capacity, fill up. repeat 3 times
    for i in range(3):
        #expand a bit so we can actually remove files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        so = Share.objects.get(name=sname)
        rusage, eusage = share_usage(so.pool, so.qgroup)
        free_space = so.size - rusage
        print('Free space on Share(%s): %d' % (sname, free_space))
        size -= int(free_space / 2)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)

    res2 = aw.api_call('shares/%s' % sname,
                       calltype='delete',
                       save_error=False)
    print('Share(%s) deleted.' % sname)
Beispiel #22
0
 def get(self, *args, **kwargs):
     try:
         auuid = self.kwargs.get("auuid", None)
         ao = Appliance.objects.get(uuid=auuid)
         url = "https://%s:%s" % (ao.ip, ao.mgmt_port)
         aw = APIWrapper(client_id=ao.client_id,
                         client_secret=ao.client_secret,
                         url=url)
         response = aw.api_call("pools")
         res = [p["name"] for p in response["results"]]
         return Response(res)
     except Appliance.DoesNotExist:
         msg = "Remote appliance with the given uuid(%s) does not exist." % auuid
         handle_exception(Exception(msg), self.request)
     except Exception as e:
         msg = ("Failed to retrieve list of Pools on the remote "
                "appliance(%s). Make sure it is running and try again. "
                "Here is the exact error: %s" % (ao.ip, e.__str__()))
         handle_exception(Exception(msg), self.request)
Beispiel #23
0
def main():
    if (len(sys.argv) == 1):
        sys.exit('Usage: %s <pool name>' % sys.argv[0])
    pname = sys.argv[1]
    sname = 'qgroup-test-share1'
    size = 1024 * 1024 #1 GiB
    aw = APIWrapper()
    res = create_share(aw, sname, pname, size)
    print('Share(%s) created. Size: %d' % (sname, size))

    fill_up_share(pname, sname)
    #expand Share and fillup. repeat 3 times
    for i in range(3):
        size += (1024 * 512)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)

    #remove random files and fillup. repeat 3 times.
    for i in range(3):
        #expand a bit so we can actually remove some files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        fill_up_share(pname, sname)

    #remove random files, shrink the pool by half of free'd capacity, fill up. repeat 3 times
    for i in range(3):
        #expand a bit so we can actually remove files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        so = Share.objects.get(name=sname)
        rusage, eusage = share_usage(so.pool, so.qgroup)
        free_space = so.size - rusage
        print('Free space on Share(%s): %d' % (sname, free_space))
        size -= int(free_space/2)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)


    res2 = aw.api_call('shares/%s' % sname, calltype='delete', save_error=False)
    print('Share(%s) deleted.' % sname)
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)
 def get(self, *args, **kwargs):
     try:
         auuid = self.kwargs.get('auuid', None)
         ao = Appliance.objects.get(uuid=auuid)
         url = ('https://%s:%s' % (ao.ip, ao.mgmt_port))
         aw = APIWrapper(client_id=ao.client_id,
                         client_secret=ao.client_secret,
                         url=url)
         response = aw.api_call('pools')
         res = [p['name'] for p in response['results']]
         return Response(res)
     except Appliance.DoesNotExist:
         msg = ('Remote appliance with the given uuid(%s) does not exist.' %
                auuid)
         handle_exception(Exception(msg), self.request)
     except Exception, e:
         msg = ('Failed to retrieve list of Pools on the remote '
                'appliance(%s). Make sure it is running and try again. '
                'Here is the exact error: %s' % (ao.ip, e.__str__()))
         handle_exception(Exception(msg), self.request)
Beispiel #26
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {'key' : 'sysinfo:connected', 'data' : 'connected'})
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
Beispiel #27
0
def main():
    aw = APIWrapper()
    device_scan()
    print('BTRFS device scan complete')

    num_attempts = 0
    while True:
        try:
            aw.api_call('network')
            aw.api_call('commands/bootstrap', calltype='post')
            break
        except requests.exceptions.ConnectionError, e:
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print('Connection error while bootstrapping. This could be because '
                  'rockstor.service is still starting up. will wait 2 seconds '
                  'and try again.')
            time.sleep(2)
            num_attempts += 1
Beispiel #28
0
def main():

    try:
        device_scan()
    except Exception as e:
        print(
            "BTRFS device scan failed due to an exception. This indicates "
            "a serious problem. Aborting. Exception: %s" % e.__str__()
        )
        sys.exit(1)
    print("BTRFS device scan complete")

    # if the appliance is not setup, there's nothing more to do beyond
    # device scan
    setup = Setup.objects.first()
    if setup is None or setup.setup_user is False:
        print("Appliance is not yet setup.")
        return

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            time.sleep(2)
            aw.api_call("network")
            aw.api_call("commands/bootstrap", calltype="post")
            break
        except Exception as e:
            # Retry on every exception, primarily because of django-oauth
            # related code behaving unpredictably while setting
            # tokens. Retrying is a decent workaround for now(11302015).
            if num_attempts > 15:
                print(
                    "Max attempts(15) reached. Connection errors persist. "
                    "Failed to bootstrap. Error: %s" % e.__str__()
                )
                sys.exit(1)
            print(
                "Exception occured while bootstrapping. This could be "
                "because rockstor.service is still starting up. will "
                "wait 2 seconds and try again. Exception: %s" % e.__str__()
            )
            time.sleep(2)
            num_attempts += 1
    print("Bootstrapping complete")

    try:
        print("Running qgroup cleanup. %s" % QGROUP_CLEAN)
        run_command([QGROUP_CLEAN])
    except Exception as e:
        print("Exception while running %s: %s" % (QGROUP_CLEAN, e.__str__()))

    try:
        print("Running qgroup limit maxout. %s" % QGROUP_MAXOUT_LIMIT)
        run_command([QGROUP_MAXOUT_LIMIT])
    except Exception as e:
        print("Exception while running %s: %s" % (QGROUP_MAXOUT_LIMIT, e.__str__()))
Beispiel #29
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected',
                  {
                      'key': 'sysinfo:connected',
                      'data': 'connected'
                  })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)
Beispiel #30
0
class PincardManagerNamespace(RockstorIO):
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            'pincardwelcome', {
                'key': 'pincardManager:pincardwelcome',
                'data': 'Welcome to Rockstor PincardManager'
            })

    def on_disconnect(self, sid):

        self.pins_user_uname = None
        self.pins_user_uid = None
        self.pins_check = None
        self.pass_reset_time = None
        self.otp = 'none'
        self.cleanup(sid)

    def on_generatepincard(self, sid, uid):
        def create_pincard(uid):

            try:
                url = 'pincardmanager/create/%s' % uid
                new_pincard = self.aw.api_call(url,
                                               data=None,
                                               calltype='post',
                                               save_error=False)
                self.emit('newpincard', {
                    'key': 'pincardManager:newpincard',
                    'data': new_pincard
                })
            except Exception, e:
                logger.error('Failed to create Pincard with '
                             'exception: %s' % e.__str__())

        self.spawn(create_pincard, sid, uid)
Beispiel #31
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if tdo.task_type not in ["reboot", "shutdown", "suspend"]:
            logger.error(
                "task_type(%s) is not a system reboot, "
                "shutdown or suspend." % tdo.task_type
            )
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        if not run_conditions_met(meta):
            logger.debug(
                "Cron scheduled task not executed because the run conditions have not been met"
            )
            return

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state="scheduled", start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = "commands/%s" % tdo.task_type

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if tdo.task_type in ["shutdown", "suspend"] and meta["wakeup"]:
                crontab_fields = tdo.crontab.split()
                crontab_time = int(crontab_fields[1]) * 60 + int(crontab_fields[0])
                wakeup_time = meta["rtc_hour"] * 60 + meta["rtc_minute"]
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(
                    hour=int(meta["rtc_hour"]),
                    minute=int(meta["rtc_minute"]),
                    second=0,
                    microsecond=0,
                )
                # if wake up < crontab time wake up will run next day
                if crontab_time > wakeup_time:
                    epoch += timedelta(days=1)

                epoch = epoch.strftime("%s")
                url = "%s/%s" % (url, epoch)

            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("System %s scheduled" % tdo.task_type)
            t.state = "finished"

        except Exception as e:
            t.state = "failed"
            logger.error("Failed to schedule system %s" % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
Beispiel #32
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
            logger.error('task_type(%s) is not a system reboot, '
                         'shutdown or suspend.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = ('commands/%s' % tdo.task_type)

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
                crontab_fields = tdo.crontab.split()
                crontab_time = (int(crontab_fields[1]) * 60 +
                                int(crontab_fields[0]))
                wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
                                               minute=int(meta['rtc_minute']),
                                               second=0,
                                               microsecond=0)
                # if wake up < crontab time wake up will run next day
                if (crontab_time > wakeup_time):
                    epoch += timedelta(days=1)

                epoch = epoch.strftime('%s')
                url = ('%s/%s' % (url, epoch))

            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('System %s scheduled' % tdo.task_type)
            t.state = 'finished'

        except Exception as e:
            t.state = 'failed'
            logger.error('Failed to schedule system %s' % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected',
                  {
                      'key': 'sysinfo:connected',
                      'data': 'connected'
                  })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.shutdown_status, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_localtime(self):

        while self.start:

            self.emit('localtime',
                      {
                          'key': 'sysinfo:localtime',
                          'data': time.strftime('%H:%M (%z %Z)')
                      })
            gevent.sleep(40)

    def send_kernel_info(self):

            try:
                self.emit('kernel_info',
                          {
                              'key': 'sysinfo:kernel_info',
                              'data': kernel_info(self.supported_kernel)
                          })
            except Exception as e:
                logger.error('Exception while gathering kernel info: %s' %
                             e.__str__())
                # Emit an event to the front end to capture error report
                self.emit('kernel_error', {
                    'key': 'sysinfo:kernel_error', 'data': str(e)})
                self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update', data=None, calltype='post',
                             save_error=False)
        except Exception as e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())

    def update_storage_state(self):
        # update storage state once a minute as long as
        # there is a client connected.
        while self.start:
            resources = [{'url': 'disks/scan',
                          'success': 'Disk state updated successfully',
                          'error': 'Failed to update disk state.'},
                         {'url': 'commands/refresh-pool-state',
                          'success': 'Pool state updated successfully',
                          'error': 'Failed to update pool state.'},
                         {'url': 'commands/refresh-share-state',
                          'success': 'Share state updated successfully',
                          'error': 'Failed to update share state.'},
                         {'url': 'commands/refresh-snapshot-state',
                          'success': 'Snapshot state updated successfully',
                          'error': 'Failed to update snapshot state.'}, ]
            for r in resources:
                try:
                    self.aw.api_call(r['url'], data=None, calltype='post',
                                     save_error=False)
                except Exception as e:
                    logger.error('%s. exception: %s'
                                 % (r['error'], e.__str__()))
            gevent.sleep(60)

    def update_check(self):

        uinfo = update_check()
        self.emit('software_update',
                  {
                      'key': 'sysinfo:software_update',
                      'data': uinfo
                  })

    def yum_updates(self):

        while self.start:
            rc, packages = yum_check()
            data = {}
            data['yum_updates'] = True if rc == 100 else False
            data['packages'] = packages
            self.emit('yum_updates',
                      {
                          'key': 'sysinfo:yum_updates',
                          'data': data
                      })
            gevent.sleep(1800)

    def on_runyum(self, sid):

        def launch_yum():

            try:
                data = {'yum_updating': False,
                        'yum_updates': False
                        }
                self.aw.api_call('commands/update', data=None,
                                 calltype='post', save_error=False)
                self.emit('yum_updates',
                          {
                              'key': 'sysinfo:yum_updates',
                              'data': data
                          })
            except Exception as e:
                logger.error('Unable to perform Yum Updates: %s'
                             % e.__str__())
        self.spawn(launch_yum, sid)

    def prune_logs(self):

        while self.start:
            self.aw.api_call('sm/tasks/log/prune', data=None, calltype='post',
                             save_error=False)
            gevent.sleep(3600)

    def shutdown_status(self):

        while self.start:
            data = {}
            output, error, return_code = service_status('systemd-shutdownd')
            data['status'] = return_code
            if (return_code == 0):
                for row in output:
                    if (re.search('Status', row) is not None):
                        data['message'] = row.split(':', 1)[1]

            self.emit('shutdown_status',
                      {
                          'key': 'sysinfo:shutdown_status',
                          'data': data
                      })

            gevent.sleep(30)
Beispiel #34
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_localtime(self):

        while self.start:

            self.emit('localtime', {
                'key': 'sysinfo:localtime',
                'data': time.strftime('%H:%M (%z %Z)')
            })
            gevent.sleep(40)

    def send_kernel_info(self):

        try:
            self.emit(
                'kernel_info', {
                    'key': 'sysinfo:kernel_info',
                    'data': kernel_info(self.supported_kernel)
                })
        except Exception as e:
            logger.error('Exception while gathering kernel info: %s' %
                         e.__str__())
            # Emit an event to the front end to capture error report
            self.emit('kernel_error', {
                'key': 'sysinfo:kernel_error',
                'data': str(e)
            })
            self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update',
                             data=None,
                             calltype='post',
                             save_error=False)
        except Exception, e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())
Beispiel #35
0
class PincardManagerNamespace(RockstorIO):
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            'pincardwelcome', {
                'key': 'pincardManager:pincardwelcome',
                'data': 'Welcome to Rockstor PincardManager'
            })

    def on_disconnect(self, sid):

        self.pins_user_uname = None
        self.pins_user_uid = None
        self.pins_check = None
        self.pass_reset_time = None
        self.otp = 'none'
        self.cleanup(sid)

    def on_generatepincard(self, sid, uid):
        def create_pincard(uid):

            try:
                url = 'pincardmanager/create/%s' % uid
                new_pincard = self.aw.api_call(url,
                                               data=None,
                                               calltype='post',
                                               save_error=False)
                self.emit('newpincard', {
                    'key': 'pincardManager:newpincard',
                    'data': new_pincard
                })
            except Exception as e:
                logger.error('Failed to create Pincard with '
                             'exception: %s' % e.__str__())

        self.spawn(create_pincard, sid, uid)

    def on_haspincard(self, sid, user):
        def check_has_pincard(user):

            pins = []
            otp = False
            self.pins_check = []
            self.otp = 'none'
            # Convert from username to uid and if user exist check for
            # pincardManager We don't tell to frontend if a user exists or not
            # to avoid exposure to security flaws/brute forcing etc
            uid = username_to_uid(user)
            user_exist = True if uid is not None else False
            user_has_pincard = False
            # If user exists we check if has a pincard
            if user_exist:
                user_has_pincard = has_pincard(uid)
            # If user is root / uid 0 we check also if email notifications are
            # enabled If not user won't be able to reset password with pincard
            if uid == 0:
                user_has_pincard = user_has_pincard and email_notification_enabled(
                )  # noqa E501

            if user_has_pincard:
                self.pins_user_uname = user
                self.pins_user_uid = uid
                pins = reset_random_pins(uid)
                for pin in pins:
                    self.pins_check.append(pin['pin_number'])

                # Set current time, user will have max 3 min to reset password
                self.pass_reset_time = datetime.now()

                if uid == 0:
                    self.otp = generate_otp(user)
                    otp = True

            self.emit(
                'haspincard', {
                    'key': 'pincardManager:haspincard',
                    'has_pincard': user_has_pincard,
                    'pins_check': pins,
                    'otp': otp
                })

        self.spawn(check_has_pincard, sid, user)

    def on_passreset(self, sid, pinlist, otp='none'):
        def password_reset(pinlist, otp):

            reset_status = False
            reset_response = None

            # On pass reset first we check for otp If not required none = none,
            # otherwhise sent val has to match stored one
            if otp == self.otp:

                # If otp is ok we check for elapsed time to be < 3 mins
                elapsed_time = (
                    datetime.now() -
                    self.pass_reset_time).total_seconds()  # noqa E501
                if elapsed_time < 180:

                    # If received pins equal expected pins, check for values
                    # via reset_password func
                    if all(int(key) in self.pins_check for key in pinlist):
                        data = {'uid': self.pins_user_uid, 'pinlist': pinlist}
                        url = 'pincardmanager/reset/%s' % self.pins_user_uname
                        headers = {'content-type': 'application/json'}
                        reset_data = self.aw.api_call(url,
                                                      data=data,
                                                      calltype='post',
                                                      headers=headers,
                                                      save_error=False)
                        reset_response = reset_data['response']
                        reset_status = reset_data['status']
                    else:
                        reset_response = ('Received pins set differs from '
                                          'expected one. Password reset '
                                          'denied')
                else:
                    reset_response = ('Pincard 3 minutes reset time has '
                                      'expired. Password reset denied')
            else:
                reset_response = ('Sent OTP doesn\'t match. Password reset '
                                  'denied')

            self.emit(
                'passresetresponse', {
                    'key': 'pincardManager:passresetresponse',
                    'response': reset_response,
                    'status': reset_status
                })

        self.spawn(password_reset, sid, pinlist, otp)
Beispiel #36
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_localtime(self):

        while self.start:

            self.emit('localtime', {
                'key': 'sysinfo:localtime',
                'data': time.strftime('%H:%M (%z %Z)')
            })
            gevent.sleep(40)

    def send_kernel_info(self):

        try:
            self.emit(
                'kernel_info', {
                    'key': 'sysinfo:kernel_info',
                    'data': kernel_info(self.supported_kernel)
                })
            # kernel_info() in above raises an Exception if the running
            # kernel != supported kernel and so:
        except Exception as e:
            logger.error('Exception while gathering kernel info: %s' %
                         e.__str__())
            # Emit an event to the front end to capture error report
            self.emit('kernel_error', {
                'key': 'sysinfo:kernel_error',
                'data': str(e)
            })
            self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update',
                             data=None,
                             calltype='post',
                             save_error=False)
        except Exception as e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())

    def update_storage_state(self):
        # update storage state once a minute as long as
        # there is a client connected.
        while self.start:
            resources = [
                {
                    'url': 'disks/scan',
                    'success': 'Disk state updated successfully',
                    'error': 'Failed to update disk state.'
                },
                {
                    'url': 'commands/refresh-pool-state',
                    'success': 'Pool state updated successfully',
                    'error': 'Failed to update pool state.'
                },
                {
                    'url': 'commands/refresh-share-state',
                    'success': 'Share state updated successfully',
                    'error': 'Failed to update share state.'
                },
                {
                    'url': 'commands/refresh-snapshot-state',
                    'success': 'Snapshot state updated successfully',
                    'error': 'Failed to update snapshot state.'
                },
            ]
            for r in resources:
                try:
                    self.aw.api_call(r['url'],
                                     data=None,
                                     calltype='post',
                                     save_error=False)
                except Exception as e:
                    logger.error('%s. exception: %s' %
                                 (r['error'], e.__str__()))
            gevent.sleep(60)

    def update_check(self):

        uinfo = update_check()
        self.emit('software_update', {
            'key': 'sysinfo:software_update',
            'data': uinfo
        })

    def yum_updates(self):

        while self.start:
            rc, packages = yum_check()
            data = {}
            data['yum_updates'] = True if rc == 100 else False
            data['packages'] = packages
            self.emit('yum_updates', {
                'key': 'sysinfo:yum_updates',
                'data': data
            })
            gevent.sleep(1800)

    def on_runyum(self, sid):
        def launch_yum():

            try:
                data = {'yum_updating': False, 'yum_updates': False}
                self.aw.api_call('commands/update',
                                 data=None,
                                 calltype='post',
                                 save_error=False)
                self.emit('yum_updates', {
                    'key': 'sysinfo:yum_updates',
                    'data': data
                })
            except Exception as e:
                logger.error('Unable to perform Yum Updates: %s' % e.__str__())

        self.spawn(launch_yum, sid)

    def prune_logs(self):

        while self.start:
            self.aw.api_call('sm/tasks/log/prune',
                             data=None,
                             calltype='post',
                             save_error=False)
            gevent.sleep(3600)

    def shutdown_status(self):

        while self.start:
            data = {}
            output, error, return_code = service_status('systemd-shutdownd')
            data['status'] = return_code
            if (return_code == 0):
                for row in output:
                    if (re.search('Status', row) is not None):
                        data['message'] = row.split(':', 1)[1]

            self.emit('shutdown_status', {
                'key': 'sysinfo:shutdown_status',
                'data': data
            })

            gevent.sleep(30)

    def pool_degraded_status(self):

        # Examples of data.message:
        # "Pools found degraded: (2) unimported"
        # "Pools found degraded: (rock-pool)"
        # "Pools found degraded: (rock-pool, rock-pool-3)"
        # "Pools found degraded: (rock-pool, rock-pool-3), plus (1) unimported"
        while self.start:
            data = {'status': 'OK'}
            deg_pools_count = degraded_pools_found()
            if deg_pools_count > 0:
                data['status'] = 'degraded'
                data['message'] = 'Pools found degraded: '
                labels = []
                for p in Pool.objects.all():
                    if p.has_missing_dev:
                        deg_pools_count -= 1
                        labels.append(p.name)
                if labels != []:
                    data['message'] += '({})'.format(', '.join(labels))
                if deg_pools_count > 0:
                    # we have degraded un-managed pools, add this info
                    if labels != []:
                        data['message'] += ', plus '
                    data['message'] += '({}) ' \
                                       'unimported'.format(deg_pools_count)

            self.emit('pool_degraded_status', {
                'key': 'sysinfo:pool_degraded_status',
                'data': data
            })

            gevent.sleep(30)
Beispiel #37
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
            logger.error('task_type(%s) is not a system reboot, '
                         'shutdown or suspend.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = ('commands/%s' % tdo.task_type)

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
                crontab_fields = tdo.crontab.split()
                crontab_time = (int(crontab_fields[1]) * 60 +
                                int(crontab_fields[0]))
                wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
                                               minute=int(meta['rtc_minute']),
                                               second=0, microsecond=0)
                # if wake up < crontab time wake up will run next day
                if (crontab_time > wakeup_time):
                    epoch += timedelta(days=1)

                epoch = epoch.strftime('%s')
                url = ('%s/%s' % (url, epoch))

            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('System %s scheduled' % tdo.task_type)
            t.state = 'finished'

        except Exception as e:
            t.state = 'failed'
            logger.error('Failed to schedule system %s' % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
Beispiel #38
0
DOCKER = '/usr/bin/docker'
ROCKON_URL = 'https://localhost/api/rockons'
DCMD = [
    DOCKER,
    'run',
    '--log-driver=syslog',
]
DCMD2 = list(DCMD) + [
    '-d',
    '--restart=unless-stopped',
]

import logging
logger = logging.getLogger(__name__)
aw = APIWrapper()


def docker_status():
    o, e, rc = service_status('docker')
    if (rc != 0):
        return False
    return True


def rockon_status(name):
    ro = RockOn.objects.get(name=name)
    if (globals().get('%s_status' % ro.name.lower()) is not None):
        return globals().get('%s_status' % ro.name.lower())(ro)
    state = 'unknown error'
    co = DContainer.objects.filter(rockon=ro).order_by('-launch_order')[0]
Beispiel #39
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION
    os_distro_name = settings.OS_DISTRO_NAME

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected',
                  {
                      'key': 'sysinfo:connected',
                      'data': 'connected'
                  })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):

        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_distroinfo(self):
        while self.start:
            data = {'distro': self.os_distro_name, 'version': distro.version()}
            self.emit('distro_info',
                      {'key': 'sysinfo:distro_info', 'data': data})
            gevent.sleep(600)

    def send_localtime(self):

        while self.start:

            self.emit('localtime',
                      {
                          'key': 'sysinfo:localtime',
                          'data': time.strftime('%H:%M (%z %Z)')
                      })
            gevent.sleep(40)

    def send_kernel_info(self):

            try:
                self.emit('kernel_info',
                          {
                              'key': 'sysinfo:kernel_info',
                              'data': kernel_info(self.supported_kernel)
                          })
                # kernel_info() in above raises an Exception if the running
                # kernel != supported kernel and so:
            except Exception as e:
                logger.error('Exception while gathering kernel info: %s' %
                             e.__str__())
                # Emit an event to the front end to capture error report
                self.emit('kernel_error', {
                    'key': 'sysinfo:kernel_error', 'data': str(e)})
                self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update', data=None, calltype='post',
                             save_error=False)
        except Exception as e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())

    def update_storage_state(self):
        # update storage state once a minute as long as
        # there is a client connected.
        while self.start:
            resources = [{'url': 'disks/scan',
                          'success': 'Disk state updated successfully',
                          'error': 'Failed to update disk state.'},
                         {'url': 'commands/refresh-pool-state',
                          'success': 'Pool state updated successfully',
                          'error': 'Failed to update pool state.'},
                         {'url': 'commands/refresh-share-state',
                          'success': 'Share state updated successfully',
                          'error': 'Failed to update share state.'},
                         {'url': 'commands/refresh-snapshot-state',
                          'success': 'Snapshot state updated successfully',
                          'error': 'Failed to update snapshot state.'}, ]
            for r in resources:
                try:
                    self.aw.api_call(r['url'], data=None, calltype='post',
                                     save_error=False)
                except Exception as e:
                    logger.error('%s. exception: %s'
                                 % (r['error'], e.__str__()))
            gevent.sleep(60)

    def update_check(self):

        uinfo = update_check()
        self.emit('software_update',
                  {
                      'key': 'sysinfo:software_update',
                      'data': uinfo
                  })

    def yum_updates(self):

        while self.start:
            rc, packages = yum_check()
            data = {}
            data['yum_updates'] = True if rc == 100 else False
            data['packages'] = packages
            self.emit('yum_updates',
                      {
                          'key': 'sysinfo:yum_updates',
                          'data': data
                      })
            gevent.sleep(1800)

    def on_runyum(self, sid):

        def launch_yum():

            try:
                data = {'yum_updating': False,
                        'yum_updates': False
                        }
                self.aw.api_call('commands/update', data=None,
                                 calltype='post', save_error=False)
                self.emit('yum_updates',
                          {
                              'key': 'sysinfo:yum_updates',
                              'data': data
                          })
            except Exception as e:
                logger.error('Unable to perform Yum Updates: %s'
                             % e.__str__())
        self.spawn(launch_yum, sid)

    def prune_logs(self):

        while self.start:
            self.aw.api_call('sm/tasks/log/prune', data=None, calltype='post',
                             save_error=False)
            gevent.sleep(3600)

    def shutdown_status(self):

        while self.start:
            data = {}
            output, error, return_code = service_status('systemd-shutdownd')
            data['status'] = return_code
            if (return_code == 0):
                for row in output:
                    if (re.search('Status', row) is not None):
                        data['message'] = row.split(':', 1)[1]

            self.emit('shutdown_status',
                      {
                          'key': 'sysinfo:shutdown_status',
                          'data': data
                      })

            gevent.sleep(30)

    def pool_degraded_status(self):

        # Examples of data.message:
        # "Pools found degraded: (2) unimported"
        # "Pools found degraded: (rock-pool)"
        # "Pools found degraded: (rock-pool, rock-pool-3)"
        # "Pools found degraded: (rock-pool, rock-pool-3), plus (1) unimported"
        while self.start:
            data = {'status': 'OK'}
            deg_pools_count = degraded_pools_found()
            if deg_pools_count > 0:
                data['status'] = 'degraded'
                data['message'] = 'Pools found degraded: '
                labels = []
                for p in Pool.objects.all():
                    if p.has_missing_dev:
                        deg_pools_count -= 1
                        labels.append(p.name)
                if labels != []:
                    data['message'] += '({})'.format(', '.join(labels))
                if deg_pools_count > 0:
                    # we have degraded un-managed pools, add this info
                    if labels != []:
                        data['message'] += ', plus '
                    data['message'] += '({}) ' \
                                       'unimported'.format(deg_pools_count)

            self.emit('pool_degraded_status',
                      {
                          'key': 'sysinfo:pool_degraded_status',
                          'data': data
                      })

            gevent.sleep(30)

    def pool_dev_stats(self):

        # Examples of data.message:
        # "Pools found with device errors: (rock-pool)"
        # "Pools found with device errors: (rock-pool, rock-pool-3)"
        # TODO: Consider blending into the existing pool_degraded_status()
        # TODO: to reduce overheads of looping through pools again.
        # TODO: The combined emitter could be called pool_health_status().
        while self.start:
            data = {'status': 'OK'}
            labels = []
            for p in Pool.objects.all():
                if not p.dev_stats_ok:
                    labels.append(p.name)
            if labels != []:
                data['status'] = 'errors'
                data['message'] = 'Pools found with device errors: '
                data['message'] += '({})'.format(', '.join(labels))

            self.emit('pool_dev_stats',
                      {
                          'key': 'sysinfo:pool_dev_stats',
                          'data': data
                      })

            gevent.sleep(30)
Beispiel #40
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('uptime', {'key': 'sysinfo:uptime', 'data': uptime()})
            gevent.sleep(60)

    def send_localtime(self):

        while self.start:

            self.emit('localtime', {
                'key': 'sysinfo:localtime',
                'data': time.strftime('%H:%M (%z %Z)')
            })
            gevent.sleep(40)

    def send_kernel_info(self):

        try:
            self.emit(
                'kernel_info', {
                    'key': 'sysinfo:kernel_info',
                    'data': kernel_info(self.supported_kernel)
                })
        except Exception as e:
            logger.error('Exception while gathering kernel info: %s' %
                         e.__str__())
            # Emit an event to the front end to capture error report
            self.emit('kernel_error', {
                'key': 'sysinfo:kernel_error',
                'data': str(e)
            })
            self.error('unsupported_kernel', str(e))

    def update_rockons(self):

        try:
            self.aw.api_call('rockons/update',
                             data=None,
                             calltype='post',
                             save_error=False)
        except Exception as e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())

    def update_storage_state(self):
        # update storage state once a minute as long as
        # there is a client connected.
        while self.start:
            resources = [
                {
                    'url': 'disks/scan',
                    'success': 'Disk state updated successfully',
                    'error': 'Failed to update disk state.'
                },
                {
                    'url': 'commands/refresh-pool-state',
                    'success': 'Pool state updated successfully',
                    'error': 'Failed to update pool state.'
                },
                {
                    'url': 'commands/refresh-share-state',
                    'success': 'Share state updated successfully',
                    'error': 'Failed to update share state.'
                },
                {
                    'url': 'commands/refresh-snapshot-state',
                    'success': 'Snapshot state updated successfully',
                    'error': 'Failed to update snapshot state.'
                },
            ]
            for r in resources:
                try:
                    self.aw.api_call(r['url'],
                                     data=None,
                                     calltype='post',
                                     save_error=False)
                except Exception as e:
                    logger.error('%s. exception: %s' %
                                 (r['error'], e.__str__()))
            gevent.sleep(60)

    def update_check(self):

        uinfo = update_check()
        self.emit('software_update', {
            'key': 'sysinfo:software_update',
            'data': uinfo
        })

    def prune_logs(self):

        while self.start:
            self.aw.api_call('sm/tasks/log/prune',
                             data=None,
                             calltype='post',
                             save_error=False)
            gevent.sleep(3600)
Beispiel #41
0
class SysinfoNamespace(BaseNamespace, BroadcastMixin):
    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # Called before the connection is established
    def initialize(self):
        self.aw = APIWrapper()

    # This function is run once on every connection
    def recv_connect(self):
        self.emit("sysinfo:sysinfo", {
            "key": "sysinfo:connected",
            "data": "connected"
        })
        self.start = True
        gevent.spawn(self.refresh_system)
        gevent.spawn(self.send_uptime)
        gevent.spawn(self.send_kernel_info)

    # Run on every disconnect
    def recv_disconnect(self):
        self.start = False
        self.disconnect()

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('sysinfo:uptime', {
                'data': uptime(),
                'key': 'sysinfo:uptime'
            })
            gevent.sleep(30)

    def send_kernel_info(self):
        try:
            self.emit(
                'sysinfo:kernel_info', {
                    'data': kernel_info(self.supported_kernel),
                    'key': 'sysinfo:kernel_info'
                })
        except Exception as e:
            logger.error('Exception while gathering kernel info: %s' %
                         e.__str__())
            # Emit an event to the front end to capture error report
            self.emit('sysinfo:kernel_error', {
                'data': str(e),
                'key': 'sysinfo:kernel_error'
            })
            self.error('unsupported_kernel', str(e))

    def refresh_system(self):
        cur_ts = datetime.utcnow()
        if ((cur_ts - self.environ['scan_ts']).total_seconds() <
                self.environ['scan_interval']):
            logger.debug('Skipping system state refresh as it was done less '
                         'than %d seconds ago.' %
                         self.environ['scan_interval'])
            return
        self.update_storage_state()
        self.update_rockons()
        self.update_check()

    def update_rockons(self):
        try:
            self.aw.api_call('rockons/update',
                             data=None,
                             calltype='post',
                             save_error=False)
        except Exception, e:
            logger.error('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())
Beispiel #42
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta['share'])
        except ValueError:
            share = Share.objects.get(name=meta['share'])
            meta['share'] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s'
                    % (meta['prefix'],
                       datetime.now().strftime(settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if(delete(aw, share, stype, prefix, max_count)):
                data = {'snap_type': stype,
                        'uvisible': meta['visible'],
                        'writable': meta['writable'], }
                headers = {'content-type': 'application/json'}
                aw.api_call(url, data=data, calltype='post', headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
Beispiel #43
0
class PincardManagerNamespace(RockstorIO):
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            "pincardwelcome",
            {
                "key": "pincardManager:pincardwelcome",
                "data": "Welcome to Rockstor PincardManager",
            },
        )

    def on_disconnect(self, sid):

        self.pins_user_uname = None
        self.pins_user_uid = None
        self.pins_check = None
        self.pass_reset_time = None
        self.otp = "none"
        self.cleanup(sid)

    def on_generatepincard(self, sid, uid):
        def create_pincard(uid):

            try:
                url = "pincardmanager/create/%s" % uid
                new_pincard = self.aw.api_call(url,
                                               data=None,
                                               calltype="post",
                                               save_error=False)
                self.emit(
                    "newpincard",
                    {
                        "key": "pincardManager:newpincard",
                        "data": new_pincard
                    },
                )
            except Exception as e:
                logger.error("Failed to create Pincard with exception: %s" %
                             e.__str__())

        self.spawn(create_pincard, sid, uid)

    def on_haspincard(self, sid, user):
        def check_has_pincard(user):

            pins = []
            otp = False
            self.pins_check = []
            self.otp = "none"
            # Convert from username to uid and if user exist check for
            # pincardManager We don't tell to frontend if a user exists or not
            # to avoid exposure to security flaws/brute forcing etc
            uid = username_to_uid(user)
            user_exist = True if uid is not None else False
            user_has_pincard = False
            # If user exists we check if has a pincard
            if user_exist:
                user_has_pincard = has_pincard(uid)
            # If user is root / uid 0 we check also if email notifications are
            # enabled If not user won't be able to reset password with pincard
            if uid == 0:
                user_has_pincard = (user_has_pincard
                                    and email_notification_enabled()
                                    )  # noqa E501

            if user_has_pincard:
                self.pins_user_uname = user
                self.pins_user_uid = uid
                pins = reset_random_pins(uid)
                for pin in pins:
                    self.pins_check.append(pin["pin_number"])

                # Set current time, user will have max 3 min to reset password
                self.pass_reset_time = datetime.now()

                if uid == 0:
                    self.otp = generate_otp(user)
                    otp = True

            self.emit(
                "haspincard",
                {
                    "key": "pincardManager:haspincard",
                    "has_pincard": user_has_pincard,
                    "pins_check": pins,
                    "otp": otp,
                },
            )

        self.spawn(check_has_pincard, sid, user)

    def on_passreset(self, sid, pinlist, otp="none"):
        def password_reset(pinlist, otp):

            reset_status = False
            reset_response = None

            # On pass reset first we check for otp If not required none = none,
            # otherwhise sent val has to match stored one
            if otp == self.otp:

                # If otp is ok we check for elapsed time to be < 3 mins
                elapsed_time = (
                    datetime.now() -
                    self.pass_reset_time).total_seconds()  # noqa E501
                if elapsed_time < 180:

                    # If received pins equal expected pins, check for values
                    # via reset_password func
                    if all(int(key) in self.pins_check for key in pinlist):
                        data = {"uid": self.pins_user_uid, "pinlist": pinlist}
                        url = "pincardmanager/reset/%s" % self.pins_user_uname
                        headers = {"content-type": "application/json"}
                        reset_data = self.aw.api_call(
                            url,
                            data=data,
                            calltype="post",
                            headers=headers,
                            save_error=False,
                        )
                        reset_response = reset_data["response"]
                        reset_status = reset_data["status"]
                    else:
                        reset_response = ("Received pins set differs from "
                                          "expected one. Password reset "
                                          "denied")
                else:
                    reset_response = ("Pincard 3 minutes reset time has "
                                      "expired. Password reset denied")
            else:
                reset_response = "Sent OTP doesn't match. Password reset denied"

            self.emit(
                "passresetresponse",
                {
                    "key": "pincardManager:passresetresponse",
                    "response": reset_response,
                    "status": reset_status,
                },
            )

        self.spawn(password_reset, sid, pinlist, otp)
Beispiel #44
0

def main():

    try:
        device_scan()
    except Exception, e:
        print ('BTRFS device scan failed due to an exception. This indicates '
               'a serious problem. Aborting. Exception: %s' % e.__str__())
        sys.exit(1)
    print('BTRFS device scan complete')

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            aw.api_call('network')
            aw.api_call('commands/bootstrap', calltype='post')
            break
        except Exception, e:
            #Retry on every exception, primarily because of django-oauth related
            #code behaving unpredictably while setting tokens. Retrying is a
            #decent workaround for now(11302015).
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print('Exception occured while bootstrapping. This could be because '
                  'rockstor.service is still starting up. will wait 2 seconds '
                  'and try again. Exception: %s' % e.__str__())
            time.sleep(2)
Beispiel #45
0
class SysinfoNamespace(RockstorIO):

    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION
    os_distro_name = settings.OS_DISTRO_NAME

    # This function is run once on every connection
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit("connected", {
            "key": "sysinfo:connected",
            "data": "connected"
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)

    # Run on every disconnect
    def on_disconnect(self, sid):

        self.cleanup(sid)
        self.start = False

    def send_uptime(self):

        while self.start:
            self.emit("uptime", {"key": "sysinfo:uptime", "data": uptime()})
            gevent.sleep(60)

    def send_distroinfo(self):
        while self.start:
            data = {"distro": self.os_distro_name, "version": distro.version()}
            self.emit("distro_info", {
                "key": "sysinfo:distro_info",
                "data": data
            })
            gevent.sleep(600)

    def send_localtime(self):

        while self.start:

            self.emit(
                "localtime",
                {
                    "key": "sysinfo:localtime",
                    "data": time.strftime("%H:%M (%z %Z)")
                },
            )
            gevent.sleep(40)

    def send_kernel_info(self):

        try:
            self.emit(
                "kernel_info",
                {
                    "key": "sysinfo:kernel_info",
                    "data": kernel_info(self.supported_kernel),
                },
            )
            # kernel_info() in above raises an Exception if the running
            # kernel != supported kernel and so:
        except Exception as e:
            logger.error("Exception while gathering kernel info: %s" %
                         e.__str__())
            # Emit an event to the front end to capture error report
            self.emit("kernel_error", {
                "key": "sysinfo:kernel_error",
                "data": str(e)
            })
            self.error("unsupported_kernel", str(e))

    def update_rockons(self):

        try:
            self.aw.api_call("rockons/update",
                             data=None,
                             calltype="post",
                             save_error=False)
        except Exception as e:
            logger.error("failed to update Rock-on metadata. low-level "
                         "exception: %s" % e.__str__())

    def update_storage_state(self):
        # update storage state once a minute as long as
        # there is a client connected.
        while self.start:
            resources = [
                {
                    "url": "disks/scan",
                    "success": "Disk state updated successfully",
                    "error": "Failed to update disk state.",
                },
                {
                    "url": "commands/refresh-pool-state",
                    "success": "Pool state updated successfully",
                    "error": "Failed to update pool state.",
                },
                {
                    "url": "commands/refresh-share-state",
                    "success": "Share state updated successfully",
                    "error": "Failed to update share state.",
                },
                {
                    "url": "commands/refresh-snapshot-state",
                    "success": "Snapshot state updated successfully",
                    "error": "Failed to update snapshot state.",
                },
            ]
            for r in resources:
                try:
                    self.aw.api_call(r["url"],
                                     data=None,
                                     calltype="post",
                                     save_error=False)
                except Exception as e:
                    logger.error("%s. exception: %s" %
                                 (r["error"], e.__str__()))
            gevent.sleep(60)

    def update_check(self):

        uinfo = rockstor_pkg_update_check()
        self.emit("software_update", {
            "key": "sysinfo:software_update",
            "data": uinfo
        })

    def yum_updates(self):

        while self.start:
            packages = pkg_update_check()
            data = {}
            if packages:  # Non empty lists are True.
                data["yum_updates"] = True
            else:
                data["yum_updates"] = False
            data["packages"] = packages
            self.emit("yum_updates", {
                "key": "sysinfo:yum_updates",
                "data": data
            })
            gevent.sleep(1800)  # 1800 seconds = 30 mins

    def on_runyum(self, sid):
        def launch_yum():

            try:
                data = {"yum_updating": False, "yum_updates": False}
                self.aw.api_call("commands/update",
                                 data=None,
                                 calltype="post",
                                 save_error=False)
                self.emit("yum_updates", {
                    "key": "sysinfo:yum_updates",
                    "data": data
                })
            except Exception as e:
                logger.error("Unable to perform Package Updates: %s" %
                             e.__str__())

        self.spawn(launch_yum, sid)

    def prune_logs(self):

        while self.start:
            self.aw.api_call("sm/tasks/log/prune",
                             data=None,
                             calltype="post",
                             save_error=False)
            gevent.sleep(3600)

    def shutdown_status(self):

        while self.start:
            data = {}
            output, error, return_code = service_status("systemd-shutdownd")
            data["status"] = return_code
            if return_code == 0:
                for row in output:
                    if re.search("Status", row) is not None:
                        data["message"] = row.split(":", 1)[1]

            self.emit("shutdown_status", {
                "key": "sysinfo:shutdown_status",
                "data": data
            })

            gevent.sleep(30)

    def pool_degraded_status(self):

        # Examples of data.message:
        # "Pools found degraded: (2) unimported"
        # "Pools found degraded: (rock-pool)"
        # "Pools found degraded: (rock-pool, rock-pool-3)"
        # "Pools found degraded: (rock-pool, rock-pool-3), plus (1) unimported"
        while self.start:
            data = {"status": "OK"}
            deg_pools_count = degraded_pools_found()
            if deg_pools_count > 0:
                data["status"] = "degraded"
                data["message"] = "Pools found degraded: "
                labels = []
                for p in Pool.objects.all():
                    if p.has_missing_dev:
                        deg_pools_count -= 1
                        labels.append(p.name)
                if labels != []:
                    data["message"] += "({})".format(", ".join(labels))
                if deg_pools_count > 0:
                    # we have degraded un-managed pools, add this info
                    if labels != []:
                        data["message"] += ", plus "
                    data["message"] += "({}) unimported".format(
                        deg_pools_count)

            self.emit(
                "pool_degraded_status",
                {
                    "key": "sysinfo:pool_degraded_status",
                    "data": data
                },
            )

            gevent.sleep(30)

    def pool_dev_stats(self):

        # Examples of data.message:
        # "Pools found with device errors: (rock-pool)"
        # "Pools found with device errors: (rock-pool, rock-pool-3)"
        # TODO: Consider blending into the existing pool_degraded_status()
        # TODO: to reduce overheads of looping through pools again.
        # TODO: The combined emitter could be called pool_health_status().
        while self.start:
            data = {"status": "OK"}
            labels = []
            for p in Pool.objects.all():
                if not p.dev_stats_ok:
                    labels.append(p.name)
            if labels != []:
                data["status"] = "errors"
                data["message"] = "Pools found with device errors: "
                data["message"] += "({})".format(", ".join(labels))

            self.emit("pool_dev_stats", {
                "key": "sysinfo:pool_dev_stats",
                "data": data
            })

            gevent.sleep(30)
class PincardManagerNamespace(RockstorIO):

    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('pincardwelcome',
                  {
                      'key': 'pincardManager:pincardwelcome',
                      'data': 'Welcome to Rockstor PincardManager'
                  })

    def on_disconnect(self, sid):

        self.pins_user_uname = None
        self.pins_user_uid = None
        self.pins_check = None
        self.pass_reset_time = None
        self.otp = 'none'
        self.cleanup(sid)

    def on_generatepincard(self, sid, uid):

        def create_pincard(uid):

            try:
                url = 'pincardmanager/create/%s' % uid
                new_pincard = self.aw.api_call(url, data=None, calltype='post',
                                               save_error=False)
                self.emit('newpincard',
                          {'key': 'pincardManager:newpincard',
                           'data': new_pincard})
            except Exception as e:
                logger.error('Failed to create Pincard with '
                             'exception: %s' % e.__str__())

        self.spawn(create_pincard, sid, uid)

    def on_haspincard(self, sid, user):

        def check_has_pincard(user):

            pins = []
            otp = False
            self.pins_check = []
            self.otp = 'none'
            # Convert from username to uid and if user exist check for
            # pincardManager We don't tell to frontend if a user exists or not
            # to avoid exposure to security flaws/brute forcing etc
            uid = username_to_uid(user)
            user_exist = True if uid is not None else False
            user_has_pincard = False
            # If user exists we check if has a pincard
            if user_exist:
                user_has_pincard = has_pincard(uid)
            # If user is root / uid 0 we check also if email notifications are
            # enabled If not user won't be able to reset password with pincard
            if uid == 0:
                user_has_pincard = user_has_pincard and email_notification_enabled()  # noqa E501

            if user_has_pincard:
                self.pins_user_uname = user
                self.pins_user_uid = uid
                pins = reset_random_pins(uid)
                for pin in pins:
                    self.pins_check.append(pin['pin_number'])

                # Set current time, user will have max 3 min to reset password
                self.pass_reset_time = datetime.now()

                if uid == 0:
                    self.otp = generate_otp(user)
                    otp = True

            self.emit('haspincard',
                      {
                          'key': 'pincardManager:haspincard',
                          'has_pincard': user_has_pincard,
                          'pins_check': pins,
                          'otp': otp
                      })

        self.spawn(check_has_pincard, sid, user)

    def on_passreset(self, sid, pinlist, otp='none'):

        def password_reset(pinlist, otp):

            reset_status = False
            reset_response = None

            # On pass reset first we check for otp If not required none = none,
            # otherwhise sent val has to match stored one
            if otp == self.otp:

                # If otp is ok we check for elapsed time to be < 3 mins
                elapsed_time = (datetime.now()-self.pass_reset_time).total_seconds()  # noqa E501
                if elapsed_time < 180:

                    # If received pins equal expected pins, check for values
                    # via reset_password func
                    if all(int(key) in self.pins_check for key in pinlist):
                        data = {'uid': self.pins_user_uid,
                                'pinlist': pinlist}
                        url = 'pincardmanager/reset/%s' % self.pins_user_uname
                        headers = {'content-type': 'application/json'}
                        reset_data = self.aw.api_call(url, data=data,
                                                      calltype='post',
                                                      headers=headers,
                                                      save_error=False)
                        reset_response = reset_data['response']
                        reset_status = reset_data['status']
                    else:
                        reset_response = ('Received pins set differs from '
                                          'expected one. Password reset '
                                          'denied')
                else:
                    reset_response = ('Pincard 3 minutes reset time has '
                                      'expired. Password reset denied')
            else:
                reset_response = ('Sent OTP doesn\'t match. Password reset '
                                  'denied')

            self.emit('passresetresponse',
                      {
                          'key': 'pincardManager:passresetresponse',
                          'response': reset_response,
                          'status': reset_status
                      })

        self.spawn(password_reset, sid, pinlist, otp)
Beispiel #47
0
from storageadmin.models import (RockOn, DContainer, DVolume, DPort,
                                 DCustomConfig, DContainerLink,
                                 ContainerOption, DContainerEnv,
                                 DContainerDevice, DContainerArgs)
from fs.btrfs import mount_share
from rockon_utils import container_status
import logging

DOCKER = '/usr/bin/docker'
ROCKON_URL = 'https://localhost/api/rockons'
DCMD = [DOCKER, 'run', ]
DCMD2 = list(DCMD) + ['-d', '--restart=unless-stopped', ]


logger = logging.getLogger(__name__)
aw = APIWrapper()


def docker_status():
    o, e, rc = service_status('docker')
    if (rc != 0):
        return False
    return True


def rockon_status(name):
    ro = RockOn.objects.get(name=name)
    if (globals().get('%s_status' % ro.name.lower()) is not None):
        return globals().get('%s_status' % ro.name.lower())(ro)
    co = DContainer.objects.filter(rockon=ro).order_by('-launch_order')[0]
    return container_status(co.name)
Beispiel #48
0
        print ('BTRFS device scan failed due to an exception. This indicates '
               'a serious problem. Aborting. Exception: %s' % e.__str__())
        sys.exit(1)
    print('BTRFS device scan complete')

    #if the appliance is not setup, there's nothing more to do beyond
    #device scan
    setup = Setup.objects.first()
    if (setup is None or setup.setup_user is False):
        print('Appliance is not yet setup.')
        return

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            aw.api_call('network')
            aw.api_call('commands/bootstrap', calltype='post')
            break
        except Exception, e:
            #Retry on every exception, primarily because of django-oauth related
            #code behaving unpredictably while setting tokens. Retrying is a
            #decent workaround for now(11302015).
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print('Exception occured while bootstrapping. This could be because '
                  'rockstor.service is still starting up. will wait 2 seconds '
                  'and try again. Exception: %s' % e.__str__())
            time.sleep(2)
Beispiel #49
0
 def initialize(self):
     self.aw = APIWrapper()
 def initialize(self):
     self.aw = APIWrapper()
     logger.debug("Sysinfo has been initialized")
class SysinfoNamespace(BaseNamespace, BroadcastMixin):
    start = False
    supported_kernel = settings.SUPPORTED_KERNEL_VERSION

    # Called before the connection is established
    def initialize(self):
        self.aw = APIWrapper()
        logger.debug("Sysinfo has been initialized")

    # This function is run once on every connection
    def recv_connect(self):
        logger.debug("Sysinfo has connected")
        self.emit("sysinfo:sysinfo", {
            "key": "sysinfo:connected", "data": "connected"
        })
        self.start = True
        gevent.spawn(self.refresh_system)
        gevent.spawn(self.send_uptime)
        gevent.spawn(self.send_kernel_info)

    # Run on every disconnect
    def recv_disconnect(self):
        logger.debug("Sysinfo has disconnected")
        self.start = False
        self.disconnect()

    def send_uptime(self):
        # Seems redundant
        while self.start:
            self.emit('sysinfo:uptime', {
                'data': uptime(), 'key': 'sysinfo:uptime'
            })
            gevent.sleep(30)

    def send_kernel_info(self):
            try:
                self.emit('sysinfo:kernel_info', {
                    'data': kernel_info(self.supported_kernel),
                    'key': 'sysinfo:kernel_info'
                })
            except Exception as e:
                logger.error('Exception while gathering kernel info: %s' % e.__str__())
                # Emit an event to the front end to capture error report
                self.emit('sysinfo:kernel_error', {
                    'data': str(e),
                    'key': 'sysinfo:kernel_error'
                })
                self.error('unsupported_kernel', str(e))

    def refresh_system(self):
        cur_ts = datetime.utcnow()
        if ((cur_ts - self.environ['scan_ts']).total_seconds() < self.environ['scan_interval']):
            logger.debug('Skipping system state refresh as it was done less '
                         'than %d seconds ago.' % self.environ['scan_interval'])
            return
        self.update_storage_state()
        self.update_rockons()
        self.update_check()

    def update_rockons(self):
        try:
            self.aw.api_call('rockons/update', data=None, calltype='post', save_error=False)
            logger.debug('Updated Rock-on metadata.')
        except Exception, e:
            logger.debug('failed to update Rock-on metadata. low-level '
                         'exception: %s' % e.__str__())
Beispiel #52
0
                                 ContainerOption, DContainerEnv)
from fs.btrfs import mount_share
from system.pkg_mgmt import install_pkg
from rockon_utils import container_status
from rockon_discourse import (discourse_install, discourse_uninstall,
                              discourse_stop, discourse_start, discourse_status)


DOCKER = '/usr/bin/docker'
ROCKON_URL = 'https://localhost/api/rockons'
DCMD = [DOCKER, 'run', '--log-driver=syslog', ]
DCMD2 = list(DCMD) + ['-d', '--restart=on-failure:5', ]

import logging
logger = logging.getLogger(__name__)
aw = APIWrapper()


def docker_status():
    o, e, rc = service_status('docker')
    if (rc != 0):
        return False
    return True


def rockon_status(name):
    ro = RockOn.objects.get(name=name)
    if (globals().get('%s_status' % ro.name.lower()) is not None):
        return globals().get('%s_status' % ro.name.lower())(ro)
    state = 'unknown error'
    co = DContainer.objects.filter(rockon=ro).order_by('-launch_order')[0]