Пример #1
0
def fill_up_share(pname, sname, chunk=(1024 * 1024 * 2)):
    so = Share.objects.get(name=sname)
    rusage, eusage = share_usage(so.pool, so.qgroup)
    print('Writing to Share(%s) until quota is exceeded.' % sname)
    print('Share(%s) Size: %d Usage: %d' % (sname, so.size, rusage))
    spath = '/mnt2/%s/%s' % (pname, sname)
    file_indices = sorted([int(f.split('-')[1]) for f in os.listdir(spath)],
                          reverse=True)
    counter = 0
    if (len(file_indices) > 0):
        counter = file_indices[0] + 1
    quota_exceeded = False
    while (not quota_exceeded):
        fname = '%s/file-%d' % (spath, counter)
        one_mb = 's' * chunk
        try:
            with open(fname, 'w') as ofo:
                for i in range(100):
                    ofo.write(one_mb)
        except IOError as e:
            if (re.search('Disk quota exceeded', e.__str__()) is not None):
                print(e.__str__())
                quota_exceeded = True
            else:
                raise e

        run_command(['/usr/bin/sync'])
        rusage, eusage = share_usage(so.pool, so.qgroup)
        print('Share(%s) Size: %d Usage: %d' % (sname, so.size, rusage))
        counter += 1
Пример #2
0
def shares_info(mnt_pt):
    #return a lit of share names unter this mount_point.
    #useful to gather names of all shares in a pool
    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', mnt_pt])
    snap_ids = []
    for l in o:
        if (re.match('ID ', l) is not None):
            snap_ids.append(l.split()[1])

    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-p', mnt_pt])
    shares_d = {}
    share_ids = []
    for l in o:
        if (re.match('ID ', l) is None):
            continue
        fields = l.split()
        vol_id = fields[1]
        if (vol_id in snap_ids):
            #snapshot
            continue

        parent_id = fields[5]
        if (parent_id in share_ids):
            #subvol of subvol. add it so child subvols can also be ignored.
            share_ids.append(vol_id)
        elif (parent_id in snap_ids):
            #snapshot/subvol of snapshot. add it so child subvols can also be ignored.
            snap_ids.append(vol_id)
        else:
            shares_d[fields[-1]] = '0/%s' % vol_id
            share_ids.append(vol_id)
    return shares_d
Пример #3
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "subvol", "list", mnt_pt])
            subvol_ids = []
            for l in o:
                if re.match("ID ", l) is not None:
                    subvol_ids.append(l.split()[1])

            o, e, rc = run_command([BTRFS, "qgroup", "show", mnt_pt], throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if re.match("0/", l) is not None:
                    q = l.split()[0].split("/")[1]
                    if q == "5":
                        continue
                    qgroup_ids.append(l.split()[0].split("/")[1])

            for q in qgroup_ids:
                if q not in subvol_ids:
                    print("qgroup %s not in use. deleting" % q)
                    run_command([BTRFS, "qgroup", "destroy", "0/%s" % q, mnt_pt])
                else:
                    print("qgroup %s is in use. Moving on." % q)
            print("Finished processing pool(%s)" % p.name)
        except Exception, e:
            print("Exception while qgroup-cleanup of Pool(%s): %s" % (p.name, e.__str__()))
Пример #4
0
def snaps_info(mnt_pt, share_name):
    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-u', '-p', '-q', mnt_pt])
    share_id = share_uuid = None
    for l in o:
        if (re.match('ID ', l) is not None):
            fields = l.split()
            if (fields[-1] == share_name):
                share_id = fields[1]
                share_uuid = fields[12]
    if (share_id is None):
        raise Exception('Failed to get uuid of the share(%s) under mount(%s)'
                        % (share_name, mnt_pt))

    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', '-p', '-q',
                            mnt_pt])
    snaps_d = {}
    for l in o:
        if (re.match('ID ', l) is not None):
            fields = l.split()
            #parent uuid must be share_uuid
            if (fields[7] != share_id and fields[15] != share_uuid):
                continue
            writable = True
            o1, e1, rc1 = run_command([BTRFS, 'property', 'get',
                                       '%s/%s' % (mnt_pt, fields[-1])])
            for l1 in o1:
                if (re.match('ro=', l1) is not None):
                    if (l1.split('=')[1] == 'true'):
                        writable = False
            snap_name = fields[-1].split('/')[-1]
            snaps_d[snap_name] = ('0/%s' % fields[1], writable)
    return snaps_d
Пример #5
0
def init_update_issue():
    default_if = None
    ipaddr = None
    o, e, c = run_command(['/usr/sbin/route'])
    for i in o:
        if (re.match('default', i) is not None):
            default_if = i.split()[-1]
    if (default_if is not None):
        o2, e, c = run_command(['/usr/sbin/ifconfig', default_if])
        for i2 in o2:
            if (re.match('inet ', i2.strip()) is not None):
                ipaddr = i2.split()[1]
    with open('/etc/issue', 'w') as ifo:
        if (ipaddr is None):
            ifo.write('The system does not yet have an ip address.\n')
            ifo.write('Rockstor cannot be configured using the web interface '
                        'without this.\n\n')
            ifo.write('Press Enter to receive updated network status\n')
            ifo.write('If this message persists login as root and configure '
                      'your network manually to proceed further.\n')
        else:
            ifo.write('\nRockstor is successfully installed.\n\n')
            ifo.write('You can access the web-ui by pointing your browser to '
                      'https://%s\n\n' % ipaddr)
    return ipaddr
Пример #6
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "qgroup", "show", "-p", mnt_pt], throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if re.match("qgroupid", l) is not None or re.match("-------", l) is not None:
                    continue
                cols = l.strip().split()
                if len(cols) != 4:
                    print("Ignoring unexcepted line(%s)." % l)
                    continue
                if cols[3] == "---":
                    print("No parent qgroup for %s" % l)
                    continue
                qgroup_ids.append(cols[3])

            for q in qgroup_ids:
                print("relaxing the limit on qgroup %s" % q)
                run_command([BTRFS, "qgroup", "limit", "none", q, mnt_pt])

            print("Finished processing pool(%s)" % p.name)
        except Exception, e:
            print("Exception while qgroup-maxout of Pool(%s): %s" % (p.name, e.__str__()))
Пример #7
0
def qgroup_assign(qid, pqid, mnt_pt):
    if (qgroup_is_assigned(qid, pqid, mnt_pt)):
        return True

    # since btrfs-progs 4.2, qgroup assign succeeds but throws a warning:
    # "WARNING: # quotas may be inconsistent, rescan needed" and returns with
    # exit code 1.
    try:
        run_command([BTRFS, 'qgroup', 'assign', qid, pqid, mnt_pt])
    except CommandException as e:
        wmsg = 'WARNING: quotas may be inconsistent, rescan needed'
        if (e.rc == 1 and e.err[0] == wmsg):
            # schedule a rescan if one is not currently running.
            dmsg = ('Quota inconsistency while assigning %s. Rescan scheduled.'
                    % qid)
            try:
                run_command([BTRFS, 'quota', 'rescan', mnt_pt])
                return logger.debug(dmsg)
            except CommandException as e2:
                emsg = 'ERROR: quota rescan failed: Operation now in progress'
                if (e2.rc == 1 and e2.err[0] == emsg):
                    return logger.debug('%s.. Another rescan already in '
                                        'progress.' % dmsg)
                logger.exception(e2)
                raise e2
        logger.exception(e)
        raise e
Пример #8
0
 def post(self, request):
     with self._handle_exception(request):
         name = request.data.get('name')
         cert = request.data.get('cert')
         key = request.data.get('key')
         TLSCertificate.objects.filter().exclude(name=name).delete()
         co, created = TLSCertificate.objects.get_or_create(name=name, defaults={'certificate': cert, 'key': key})
         if (not created):
             co.certificate = cert
             co.key = key
             co.save()
         fo, kpath = mkstemp()
         fo, cpath = mkstemp()
         with open(kpath, 'w') as kfo, open(cpath, 'w') as cfo:
             kfo.write(key)
             cfo.write(cert)
         try:
             o, e, rc = run_command([OPENSSL, 'rsa', '-noout', '-modulus',
                                     '-in', kpath])
         except Exception, e:
             logger.exception(e)
             e_msg = ('RSA key modulus could not be verified for the given '
                      'Private Key. Correct your input and try again')
             handle_exception(Exception(e_msg), request)
         try:
             o2, e, rc = run_command([OPENSSL, 'x509', '-noout',
                                      '-modulus', '-in', cpath])
         except Exception, e:
             logger.exception(e)
             e_msg = ('RSA key modulus could not be verified for the given '
                      'Certificate. Correct your input and try again')
             handle_exception(Exception(e_msg), request)
Пример #9
0
def main():
    for p in Pool.objects.all():
        print('Processing pool(%s)' % p.name)
        mnt_pt = mount_root(p)
        o, e, rc = run_command([BTRFS, 'subvol', 'list', mnt_pt])
        subvol_ids = []
        for l in o:
            if (re.match('ID ', l) is not None):
                subvol_ids.append(l.split()[1])

        o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], throw=False)
        if (rc != 0):
            print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
            continue

        qgroup_ids = []
        for l in o:
            if (re.match('0/', l) is not None):
                q = l.split()[0].split('/')[1]
                if (q == '5'):
                    continue
                qgroup_ids.append(l.split()[0].split('/')[1])

        for q in qgroup_ids:
            if (q not in subvol_ids):
                print('qgroup %s not in use. deleting' % q)
                run_command([BTRFS, 'qgroup', 'destroy', '0/%s' % q, mnt_pt])
            else:
                print('qgroup %s is in use. Moving on.' % q)
        print('Finished processing pool(%s)' % p.name)
Пример #10
0
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    rd = root_disk()
    logging.debug('Root drive is %s' % rd)
    do_more = False
    if (trim_support(rd) is True):
        do_more = True
        logging.info('TRIM support is available for %s' % rd)
        fstrim_systemd()
        logging.debug('Finished setting up fstrim timer')
    do_more = do_more or is_flash(rd)

    if (do_more):
        update_sysctl()
        logging.info('updated sysctl')
        #emable tmpfs on /tmp
        tmpmnt = 'tmp.mount'
        systemctl(tmpmnt, 'enable')
        logging.info('enabled %s' % tmpmnt)
        systemctl(tmpmnt, 'start')
        logging.info('started %s' % tmpmnt)

        #mount stuff with noatime
        #add noatime to /, /home and /boot in /etc/fstab
        update_fstab()
        logging.info('updated fstab')
        for fs in ROOT_FS:
            run_command(['mount', fs, '-o', 'remount'])
            logging.info('remounted %s' % fs)
Пример #11
0
def owncloud_install(rockon):
    for c in DContainer.objects.filter(rockon=rockon).order_by('launch_order'):
        cmd = list(DCMD2) + ['--name', c.name, ]
        db_user = DCustomConfig.objects.get(rockon=rockon, key='db_user').val
        db_pw = DCustomConfig.objects.get(rockon=rockon, key='db_pw').val
        if (c.dimage.name == 'postgres'):
            cmd.extend(['-e', 'POSTGRES_USER=%s' % db_user, '-e',
                        'POSTGRES_PASSWORD=%s' % db_pw])
        cmd.extend(port_ops(c))
        for lo in DContainerLink.objects.filter(destination=c):
            cmd.extend(['--link', '%s:%s' % (lo.source.name, lo.name)])
        cmd.extend(vol_ops(c))
        if (c.name == 'owncloud'):
            cmd.extend(['-v', '%s/rockstor.key:/etc/ssl/private/owncloud.key' % settings.CERTDIR,
                        '-v', '%s/rockstor.cert:/etc/ssl/certs/owncloud.crt' % settings.CERTDIR,
                        '-e', 'HTTPS_ENABLED=true'])
            cmd.extend(['-e', 'DB_USER=%s' % db_user, '-e', 'DB_PASS=%s' % db_pw,])
        cmd.append(c.dimage.name)
        logger.debug('docker cmd = %s' % cmd)
        run_command(cmd)
        if (c.dimage.name == 'postgres'):
            #make sure postgres is setup
            cur_wait = 0;
            while (True):
                o, e, rc = run_command([DOCKER, 'exec', c.name, 'psql', '-U',
                                        'postgres', '-c', "\l"], throw=False)
                if (rc == 0):
                    break
                if (cur_wait > 300):
                    logger.error('Waited too long(300 seconds) for '
                                 'postgres to initialize for owncloud. giving up.')
                    break
                time.sleep(1)
                cur_wait += 1
def main():
    for p in Pool.objects.all():
        print('Processing pool(%s)' % p.name)
        mnt_pt = mount_root(p)
        o, e, rc = run_command([BTRFS, 'qgroup', 'show', '-p', mnt_pt],
                               throw=False)
        if (rc != 0):
            print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
            continue

        qgroup_ids = []
        for l in o:
            if (re.match('qgroupid', l) is not None or
                re.match('-------', l) is not None):
                continue
            cols = l.strip().split()
            if (len(cols) != 4):
                print('Ignoring unexcepted line(%s).' % l)
                continue
            if (cols[3] == '---'):
                print('No parent qgroup for %s' % l)
                continue
            qgroup_ids.append(cols[3])

        for q in qgroup_ids:
            print('relaxing the limit on qgroup %s' % q)
            run_command([BTRFS, 'qgroup', 'limit', 'none', q, mnt_pt])

        print('Finished processing pool(%s)' % p.name)
Пример #13
0
def backup_config():
    models = {'storageadmin':
              ['user', 'group', 'sambashare', 'sambacustomconfig',
               'netatalkshare', 'nfsexport',
               'nfsexportgroup', 'advancednfsexport', ],
              'smart_manager':
              ['service', ], }
    model_list = []
    for a in models:
        for m in models[a]:
            model_list.append('%s.%s' % (a, m))
    logger.debug('model list = %s' % model_list)

    filename = ('backup-%s.json' % datetime.now().strftime('%Y-%m-%d-%H%M%S'))
    cb_dir = ConfigBackup.cb_dir()

    if (not os.path.isdir(cb_dir)):
        os.mkdir(cb_dir)
    fp = os.path.join(cb_dir, filename)
    with open(fp, 'w') as dfo:
        call_command('dumpdata', *model_list, stdout=dfo)
        dfo.write('\n')
        call_command('dumpdata', database='smart_manager', *model_list,
                     stdout=dfo)
    run_command(['/usr/bin/gzip', fp])
    gz_name = ('%s.gz' % filename)
    fp = os.path.join(cb_dir, gz_name)
    size = os.stat(fp).st_size
    cbo = ConfigBackup(filename=gz_name, md5sum=md5sum(fp), size=size)
    cbo.save()
    return cbo
Пример #14
0
def remove_share(pool, share_name, pqgroup, force=False):
    """
    umount share if its mounted.
    mount root pool
    btrfs subvolume delete root_mnt/vol_name
    umount root pool
    """
    if (is_share_mounted(share_name)):
        mnt_pt = ('%s%s' % (DEFAULT_MNT_DIR, share_name))
        umount_root(mnt_pt)
    root_pool_mnt = mount_root(pool)
    subvol_mnt_pt = root_pool_mnt + '/' + share_name
    if (not is_subvol(subvol_mnt_pt)):
        return
    if (force):
        o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-o', subvol_mnt_pt])
        for l in o:
            if (re.match('ID ', l) is not None):
                subvol = root_pool_mnt + '/' + l.split()[-1]
                run_command([BTRFS, 'subvolume', 'delete', subvol], log=True)
    qgroup = ('0/%s' % share_id(pool, share_name))
    delete_cmd = [BTRFS, 'subvolume', 'delete', subvol_mnt_pt]
    run_command(delete_cmd, log=True)
    qgroup_destroy(qgroup, root_pool_mnt)
    return qgroup_destroy(pqgroup, root_pool_mnt)
Пример #15
0
def snaps_info(mnt_pt, share_name):
    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-u', '-p', '-q', mnt_pt])
    share_id = share_uuid = None
    for l in o:
        if (re.match('ID ', l) is not None):
            fields = l.split()
            if (fields[-1] == share_name):
                share_id = fields[1]
                share_uuid = fields[12]
    if (share_id is None): return {}

    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', '-p', '-q',
                            '-u', mnt_pt])
    snaps_d = {}
    snap_uuids = []
    for l in o:
        if (re.match('ID ', l) is not None):
            fields = l.split()
            # parent uuid must be share_uuid or another snapshot's uuid
            if (fields[7] != share_id and fields[15] != share_uuid and
                fields[15] not in snap_uuids):
                continue
            snap_name, writable = parse_snap_details(mnt_pt, fields)
            snaps_d[snap_name] = ('0/%s' % fields[1], writable, )
            # we rely on the observation that child snaps are listed after their
            # parents, so no need to iterate through results separately.
            # Instead, we add the uuid of a snap to the list and look up if
            # it's a parent of subsequent entries.
            snap_uuids.append(fields[17])

    return snaps_d
Пример #16
0
def qgroup_destroy(qid, mnt_pt):
    o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt])
    for l in o:
        if (re.match(qid, l) is not None and
            l.split()[0] == qid):
            return run_command([BTRFS, 'qgroup', 'destroy', qid, mnt_pt], log=True)
    return False
Пример #17
0
def start_balance(mnt_pt, force=False, convert=None):
    cmd = ['btrfs', 'balance', 'start', mnt_pt]
    if (force):
        cmd.insert(3, '-f')
    if (convert is not None):
        cmd.insert(3, '-dconvert=%s' % convert)
        cmd.insert(3, '-mconvert=%s' % convert)
    run_command(cmd)
Пример #18
0
def update_quota(pool_name, pool_device, qgroup, size_bytes):
    pool_device = '/dev/' + pool_device
    root_pool_mnt = mount_root(pool_name, pool_device)
    cmd = [BTRFS, 'qgroup', 'limit', size_bytes, qgroup, root_pool_mnt]
    out, err, rc = run_command(cmd)
    run_command(SYNC)
    umount_root(root_pool_mnt)
    return out, err, rc
Пример #19
0
def start_balance(mnt_pt, force=False, convert=None):
    cmd = ["btrfs", "balance", "start", mnt_pt]
    if force:
        cmd.insert(3, "-f")
    if convert is not None:
        cmd.insert(3, "-dconvert=%s" % convert)
        cmd.insert(3, "-mconvert=%s" % convert)
    run_command(cmd)
Пример #20
0
def handle_exception(e, request):
    logger.error('request path: %s method: %s data: %s' %
                 (request.path, request.method, request.DATA))
    logger.exception('exception: %s' % e.__str__())
    run_command(['/usr/bin/tar', '-c', '-z', '-f',
                     settings.ROOT_DIR + 'src/rockstor/logs/error.tgz',
                     settings.ROOT_DIR + 'var/log'])
    raise RockStorAPIException(detail=e.__str__())
Пример #21
0
def mount_root(pool_name, device):
    root_pool_mnt = DEFAULT_MNT_DIR + pool_name
    if (is_share_mounted(pool_name)):
        return root_pool_mnt
    create_tmp_dir(root_pool_mnt)
    mnt_cmd = [MOUNT, '-t', 'btrfs', device, root_pool_mnt]
    run_command(mnt_cmd)
    return root_pool_mnt
Пример #22
0
def generic_install(rockon):
    for c in DContainer.objects.filter(rockon=rockon).order_by('launch_order'):
        cmd = list(DCMD2) + ['--name', c.name,]
        cmd.extend(vol_ops(c))
        cmd.extend(port_ops(c))
        cmd.extend(container_ops(c))
        cmd.append(c.dimage.name)
        run_command(cmd)
Пример #23
0
def update_sasl(smtp_server, sender, password, revert=False):
    sasl_file = '/etc/postfix/sasl_passwd'
    with open(sasl_file, 'w') as fo:
        if (not revert):
            fo.write('[%s]:587 %s:%s\n' % (smtp_server, sender, password))
    os.chmod(sasl_file, 0400)
    run_command([POSTMAP, sasl_file])
    os.chmod('%s.db' % sasl_file, 0600)
Пример #24
0
def switch_quota(pool_name, device, flag='enable'):
    root_mnt_pt = mount_root(pool_name, device)
    cmd = [BTRFS, 'quota', flag, root_mnt_pt]
    out, err, rc = run_command(cmd)
    #@hack -- umount without sync failes.
    run_command(SYNC)
    umount_root(root_mnt_pt)
    return out, err, rc
Пример #25
0
def transmission_install(rockon):
    co = DContainer.objects.get(rockon=rockon, launch_order=1)
    cmd = list(DCMD2) + ['--name', co.name]
    for cco in DCustomConfig.objects.filter(rockon=rockon):
        cmd.extend(['-e', '%s=%s' % (cco.key, cco.val)])
    cmd.extend(vol_ops(co))
    cmd.extend(port_ops(co))
    cmd.append(co.dimage.name)
    run_command(cmd)
Пример #26
0
def generic_start(rockon):
    new_status = 'started'
    try:
        for c in DContainer.objects.filter(rockon=rockon).order_by('launch_order'):
            run_command([DOCKER, 'start', c.name])
    except Exception, e:
        logger.error('Exception while starting the rockon(%s)' % rockon.name)
        logger.exception(e)
        new_status = 'start_failed'
Пример #27
0
def generic_stop(rockon):
    new_status = 'stopped'
    try:
        for c in DContainer.objects.filter(rockon=rockon).order_by('-launch_order'):
            run_command([DOCKER, 'stop', c.name])
    except Exception, e:
        logger.debug('exception while stopping the rockon(%s)' % rockon.name)
        logger.exception(e)
        new_status = 'stop_failed'
Пример #28
0
 def process_exception(self, request, exception):
     """just log the exception"""
     e_msg = ('Exception occured while processing a request. Path: %s '
              'method: %s' % (request.path, request.method))
     logger.error(e_msg)
     logger.exception(exception)
     run_command(['/usr/bin/tar', '-c', '-z', '-f',
                  settings.ROOT_DIR + 'src/rockstor/logs/error.tgz',
                  settings.ROOT_DIR + 'var/log'])
Пример #29
0
def add_share(pool_name, pool_device, share_name):
    """
    share is a subvolume in btrfs.
    """
    pool_device = '/dev/' + pool_device
    root_pool_mnt = mount_root(pool_name, pool_device)
    subvol_mnt_pt = root_pool_mnt + '/' + share_name
    sub_vol_cmd = [BTRFS, 'subvolume', 'create', subvol_mnt_pt]
    run_command(sub_vol_cmd)
Пример #30
0
def mount_root(pool):
    """
    Mounts a given pool at the default mount root (usually /mnt2/) using the
    pool.name as the final path entry. Ie pool.name = test-pool will be mounted
    at /mnt2/test-pool. Any mount options held in pool.mnt_options will be added
    to the mount command via the -o option as will a compress=pool.compression
    entry.
    N.B. Initially the mount target is defined by /dev/disk/by-label/pool.name,
    if this fails then an attempt to mount by each member of
    /dev/disk/by-id/pool.disk_set.all() but only if there are any members.
    If this second method also fails then an exception is raised, currently all
    but the last failed mount by device name is logged. If no disk members were
    reported by pool.disk_set.count() a separate Exception is raised.
    :param pool: pool object
    :return: either the relevant mount point or an Exception which either
    indicates 'no disks in pool' or 'Unknown Reason'
    """
    root_pool_mnt = DEFAULT_MNT_DIR + pool.name
    if (is_share_mounted(pool.name)):
        return root_pool_mnt
    # Creates a directory to act as the mount point.
    create_tmp_dir(root_pool_mnt)
    mnt_device = '/dev/disk/by-label/%s' % pool.name
    mnt_cmd = [MOUNT, mnt_device, root_pool_mnt, ]
    mnt_options = ''
    if (pool.mnt_options is not None):
        mnt_options = pool.mnt_options
    if (pool.compression is not None):
        if (re.search('compress', mnt_options) is None):
            mnt_options = ('%s,compress=%s' % (mnt_options, pool.compression))
    if (os.path.exists(mnt_device)):
        if (len(mnt_options) > 0):
            mnt_cmd.extend(['-o', mnt_options])
        run_command(mnt_cmd)
        return root_pool_mnt
    # If we cannot mount by-label, let's try mounting by device; one by one
    # until we get our first success.
    if (pool.disk_set.count() < 1):
        raise Exception('Cannot mount Pool(%s) as it has no disks in it.' % pool.name)
    last_device = pool.disk_set.last()
    for device in pool.disk_set.all():
        mnt_device = ('/dev/disk/by-id/%s' % device.name)
        if (os.path.exists(mnt_device)):
            mnt_cmd = [MOUNT, mnt_device, root_pool_mnt, ]
            if (len(mnt_options) > 0):
                mnt_cmd.extend(['-o', mnt_options])
            try:
                run_command(mnt_cmd)
                return root_pool_mnt
            except Exception, e:
                if (device.name == last_device.name):
                    # exhausted mounting using all devices in the pool
                    raise e
                logger.error('Error mounting: %s. Will try using another device.' % mnt_cmd)
                logger.exception(e)
Пример #31
0
def owncloud_install(rockon):
    for c in DContainer.objects.filter(rockon=rockon).order_by('launch_order'):
        rm_container(c.name)
        cmd = list(DCMD2) + [
            '--name',
            c.name,
        ]
        db_user = DCustomConfig.objects.get(rockon=rockon, key='db_user').val
        db_pw = DCustomConfig.objects.get(rockon=rockon, key='db_pw').val
        if (c.dimage.name == 'postgres'):
            # change permissions on the db volume to 700
            vo = DVolume.objects.get(container=c)
            share_mnt = ('%s%s' % (settings.MNT_PT, vo.share.name))
            run_command(['/usr/bin/chmod', '700', share_mnt])
            cmd.extend([
                '-e',
                'POSTGRES_USER=%s' % db_user, '-e',
                'POSTGRES_PASSWORD=%s' % db_pw
            ])
        cmd.extend(port_ops(c))
        for lo in DContainerLink.objects.filter(destination=c):
            cmd.extend(['--link', '%s:%s' % (lo.source.name, lo.name)])
        cmd.extend(vol_ops(c))
        if (c.name == 'owncloud'):
            cmd.extend([
                '-v',
                '%s/rockstor.key:/etc/ssl/private/owncloud.key' %
                settings.CERTDIR,  # noqa E501
                '-v',
                '%s/rockstor.cert:/etc/ssl/certs/owncloud.crt' %
                settings.CERTDIR,  # noqa E501
                '-e',
                'HTTPS_ENABLED=true'
            ])
            cmd.extend([
                '-e',
                'DB_USER=%s' % db_user,
                '-e',
                'DB_PASS=%s' % db_pw,
            ])
        cmd.append(c.dimage.name)
        logger.debug('docker cmd = %s' % cmd)
        run_command(cmd)
        if (c.dimage.name == 'postgres'):
            # make sure postgres is setup
            cur_wait = 0
            while (True):
                o, e, rc = run_command([
                    DOCKER, 'exec', c.name, 'psql', '-U', 'postgres', '-c',
                    "\l"
                ],
                                       throw=False)
                if (rc == 0):
                    break
                if (cur_wait > 300):
                    logger.error('Waited too long(300 seconds) for '
                                 'postgres to initialize for owncloud. '
                                 'giving up.')
                    break
                time.sleep(1)
                cur_wait += 1
Пример #32
0
def pull_images(rockon):
    for c in DContainer.objects.filter(rockon=rockon):
        rm_container(c.name)
        run_command([DOCKER, 'pull', c.dimage.name])
Пример #33
0
def discourse_install(rockon):
    # 1. install git
    git = "/usr/bin/git"
    if not os.path.isfile(git):
        install_pkg("git")

    # 2. prep Discourse.yml
    repo = discourse_repo(rockon)
    if not os.path.isdir(repo):
        run_command([
            git, "clone", "https://github.com/discourse/discourse_docker.git",
            repo
        ])

    co = DContainer.objects.get(rockon=rockon)
    po = DPort.objects.get(container=co)
    cc_map = {}
    for cco in DCustomConfig.objects.filter(rockon=rockon):
        cc_map[cco.key] = cco.val
    mem = int((psutil.virtual_memory().total / (1024 * 1024)) * 0.25)

    fo, npath = mkstemp()
    src_yml = "%s/samples/standalone.yml" % repo
    dst_yml = "%s/containers/%s.yml" % (repo, rockon.name.lower())
    with open(src_yml) as sfo, open(npath, "w") as tfo:
        for line in sfo.readlines():
            if re.match('  - "80:80"', line) is not None:
                tfo.write('  - "%d:80"\n' % po.hostp)
            elif re.match("  #db_shared_buffers:", line) is not None:
                tfo.write('  db_shared_buffers: "%dMB"\n' % mem)
            elif re.match("  #UNICORN_WORKERS:", line) is not None:
                tfo.write("  UNICORN_WORKERS: 3\n")
            elif re.match("  DISCOURSE_DEVELOPER_EMAILS:", line) is not None:
                tfo.write("  DISCOURSE_DEVELOPER_EMAILS: '%s'\n" %
                          cc_map["admin-email"])
            elif re.match("  DISCOURSE_HOSTNAME:", line) is not None:
                tfo.write("  DISCOURSE_HOSTNAME: '%s'\n" % cc_map["hostname"])
            elif re.match("  DISCOURSE_SMTP_ADDRESS:", line) is not None:
                tfo.write("  DISCOURSE_SMTP_ADDRESS: %s\n" %
                          cc_map["smtp-address"])
            elif re.match("  #DISCOURSE_SMTP_PORT:", line) is not None:
                tfo.write("  DISCOURSE_SMTP_PORT: %s\n" % cc_map["smtp-port"])
            elif re.match("  #DISCOURSE_SMTP_USER_NAME:", line) is not None:
                tfo.write("  DISCOURSE_SMTP_USER_NAME: %s\n" %
                          cc_map["smtp-username"])
            elif re.match("  #DISCOURSE_SMTP_PASSWORD:"******"  DISCOURSE_SMTP_PASSWORD: %s\n" %
                          cc_map["smtp-password"])
            elif (re.match("      host: /var/discourse/shared/standalone",
                           line) is not None):  # noqa E501
                tfo.write("      host: %s/shares/standalone\n" % repo)
            elif (re.match(
                    "      host: /var/discourse/shared/standalone/log/var-log",
                    line) is not None):  # noqa E501
                tfo.write("      host: %s/shared/standalone/log/var-log\n" %
                          repo)
            else:
                tfo.write(line)
    move(npath, dst_yml)

    # 3. bootstrap: launcher bootstrap app
    run_command(["%s/launcher" % repo, "bootstrap", rockon.name.lower()])

    # 4. start: launcher start app
    run_command(["%s/launcher" % repo, "start", rockon.name.lower()])
Пример #34
0
def device_scan():
    return run_command([BTRFS, 'device', 'scan'])
Пример #35
0
 def _leave_domain(config):
     domain = config.get('domain')
     cmd = ['realm', 'leave', domain]
     return run_command(cmd)
Пример #36
0
def discourse_uninstall(rockon):
    repo = discourse_repo(rockon)
    if os.path.isdir(repo):
        run_command(["%s/launcher" % repo, "destroy", rockon.name.lower()])
    return run_command(["/usr/bin/rm", "-rf", repo])
Пример #37
0
def set_property(mnt_pt, name, val, mount=True):
    if (mount is not True or is_mounted(mnt_pt)):
        cmd = [BTRFS, 'property', 'set', mnt_pt, name, val]
        return run_command(cmd)
Пример #38
0
def qgroup_create(pool):
    # mount pool
    mnt_pt = mount_root(pool)
    qid = ('%s/%d' % (QID, qgroup_max(mnt_pt) + 1))
    o, e, rc = run_command([BTRFS, 'qgroup', 'create', qid, mnt_pt], log=True)
    return qid
Пример #39
0
    def post(self, request, command):

        with self._handle_exception(request):
            method = "sssd"
            service = Service.objects.get(name="active-directory")
            if command == "config":
                config = request.data.get("config")
                self._validate_config(config, request)

                # 1. Name resolution check
                self._resolve_check(config.get("domain"), request)

                # 2. realm discover check?
                domain = config.get("domain")
                try:
                    cmd = [REALM, "discover", "--name-only", domain]
                    o, e, rc = run_command(cmd)
                except Exception as e:
                    e_msg = ("Failed to discover the given({}) AD domain. "
                             "Error: {}".format(domain, e.__str__()))
                    handle_exception(Exception(e_msg), request)
                # Would be required only if method == "winbind":
                # validate_idmap_range(config)

                self._save_config(service, config)

            elif command == "start":
                config = self._config(service, request)
                domain = config.get("domain")
                # 1. make sure ntpd is running, or else, don't start.
                self._ntp_check(request)
                # 2. Name resolution check?
                self._resolve_check(config.get("domain"), request)

                if method == "winbind":
                    cmd = [
                        "/usr/sbin/authconfig",
                    ]
                    # nss
                    cmd += [
                        "--enablewinbind",
                        "--enablewins",
                    ]
                    # pam
                    cmd += [
                        "--enablewinbindauth",
                    ]
                    # smb
                    cmd += [
                        "--smbsecurity",
                        "ads",
                        "--smbrealm",
                        domain.upper(),
                    ]
                    # kerberos
                    cmd += [
                        "--krb5realm=%s" % domain.upper(),
                    ]
                    # winbind
                    cmd += [
                        "--enablewinbindoffline",
                        "--enablewinbindkrb5",
                        "--winbindtemplateshell=/bin/sh",
                    ]
                    # general
                    cmd += [
                        "--update",
                        "--enablelocauthorize",
                    ]
                    run_command(cmd)
                # 3. Get WORKGROUP from AD server
                config["workgroup"] = domain_workgroup(domain, method=method)

                # 4. Update Samba config
                smbo = Service.objects.get(name="smb")
                # smb_config = self._get_config(smbo)
                try:
                    smb_config = self._get_config(smbo)
                    if smb_config["workgroup"] != config["workgroup"]:
                        # the current Samba workgroup is different than what
                        # we need so stop here and alert the user
                        err_msg = (
                            "The AD domain workgroup differs from the workgroup "
                            "currently defined in the Samba configuration:\n"
                            "AD domain workgroup: {}\n"
                            "Samba workgroup: {}\n"
                            "Ensure the Samba workgroup matches the AD domain "
                            "workgroup and try again.".format(
                                config["workgroup"], smb_config["workgroup"]))
                        raise Exception(err_msg)
                except TypeError:
                    # Samba service is not configured, so let's do that now
                    smb_config = {}
                    smb_config["workgroup"] = config["workgroup"]
                    self._save_config(smbo, smb_config)
                # finally:
                #     # Set Samba WORKGROUP as AD REALM and save entry to Model
                update_global_config(smb_config, config)

                # 5. Save final Active_Directory service config and join AD
                self._save_config(service, config)
                join_domain(config, method=method)

                # SSSD config
                # Ensure ifp service is activated
                sssd_update_services(service="ifp")
                # Customize domain settings
                if (method == "sssd"
                        and (config.get("enumerate")
                             or config.get("case_sensitive")) is True):
                    sssd_update_ad(domain, config)

                # Update nsswitch.conf
                update_nss(["passwd", "group"], "sss")

                systemctl("smb", "restart")
                systemctl("nmb", "restart")
                # The winbind service is required only for id mapping while
                # accessing samba shares hosted by Rockstor
                systemctl("winbind", "enable")
                systemctl("winbind", "start")

            elif command == "stop":
                config = self._config(service, request)
                try:
                    leave_domain(config, method=method)
                    smbo = Service.objects.get(name="smb")
                    smb_config = self._get_config(smbo)
                    update_global_config(smb_config)
                    systemctl("smb", "restart")
                    systemctl("nmb", "restart")
                    systemctl("winbind", "stop")
                    systemctl("winbind", "disable")
                    update_nss(["passwd", "group"], "sss", remove=True)
                except Exception as e:
                    e_msg = "Failed to leave AD domain({}). Error: {}".format(
                        config.get("domain"),
                        e.__str__(),
                    )
                    handle_exception(Exception(e_msg), request)

            return Response()
Пример #40
0
            break
        except requests.exceptions.ConnectionError, e:
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print(
                'Connection error while bootstrapping. This could be because '
                'rockstor.service is still starting up. will wait 2 seconds '
                'and try again.')
            time.sleep(2)
            num_attempts += 1
    print('Bootstrapping complete')

    try:
        print('Running qgroup cleanup. %s' % QGROUP_CLEAN)
        run_command([QGROUP_CLEAN])
    except Exception, e:
        print('Exception while running %s: %s' % (QGROUP_CLEAN, e.__str__()))

    try:
        print('Running qgroup limit maxout. %s' % QGROUP_MAXOUT_LIMIT)
        run_command([QGROUP_MAXOUT_LIMIT])
    except Exception, e:
        print('Exception while running %s: %s' %
              (QGROUP_MAXOUT_LIMIT, e.__str__()))


if __name__ == '__main__':
    main()
Пример #41
0
                if (method == 'winbind'):
                    cmd = ['/usr/sbin/authconfig', ]
                    #nss
                    cmd += ['--enablewinbind', '--enablewins',]
                    #pam
                    cmd += ['--enablewinbindauth',]
                    #smb
                    cmd += ['--smbsecurity', 'ads', '--smbrealm', domain.upper(),]
                    #kerberos
                    cmd += ['--krb5realm=%s' % domain.upper(),]
                    #winbind
                    cmd += ['--enablewinbindoffline', '--enablewinbindkrb5',
                            '--winbindtemplateshell=/bin/sh',]
                    #general
                    cmd += ['--update', '--enablelocauthorize',]
                    run_command(cmd)
                config['workgroup'] = self._domain_workgroup(domain, method=method)
                self._save_config(service, config)
                update_global_config(smb_config, config)
                self._join_domain(config, method=method)
                if (method == 'sssd' and config.get('enumerate') is True):
                    self._update_sssd(domain)

                if (method == 'winbind'):
                    systemctl('winbind', 'enable')
                    systemctl('winbind', 'start')
                systemctl('smb', 'restart')
                systemctl('nmb', 'restart')

            elif (command == 'stop'):
                config = self._config(service, request)
Пример #42
0
    def run(self):
        logger.debug("Id: %s. Starting a new Receiver for meta: %s" %
                     (self.identity, self.meta))
        self.msg = "Top level exception in receiver"
        latest_snap = None
        with self._clean_exit_handler():
            self.law = APIWrapper()
            self.poll = zmq.Poller()
            self.dealer = self.ctx.socket(zmq.DEALER)
            self.dealer.setsockopt_string(zmq.IDENTITY, u"%s" % self.identity)
            self.dealer.set_hwm(10)
            self.dealer.connect("ipc://%s" %
                                settings.REPLICATION.get("ipc_socket"))
            self.poll.register(self.dealer, zmq.POLLIN)

            self.ack = True
            self.msg = "Failed to get the sender ip for appliance: %s" % self.sender_id
            self.sender_ip = Appliance.objects.get(uuid=self.sender_id).ip

            if not self.incremental:
                self.msg = "Failed to verify/create share: %s." % self.sname
                self.create_share(self.sname, self.dest_pool)

                self.msg = ("Failed to create the replica metadata object "
                            "for share: %s." % self.sname)
                data = {
                    "share": self.sname,
                    "appliance": self.sender_ip,
                    "src_share": self.src_share,
                }
                self.rid = self.create_rshare(data)
            else:
                self.msg = ("Failed to retreive the replica metadata "
                            "object for share: %s." % self.sname)
                rso = ReplicaShare.objects.get(share=self.sname)
                self.rid = rso.id
                # Find and send the current snapshot to the sender. This will
                # be used as the start by btrfs-send diff.
                self.msg = (
                    "Failed to verify latest replication snapshot on the system."
                )
                latest_snap = self._latest_snap(rso)

            self.msg = "Failed to create receive trail for rid: %d" % self.rid
            data = {
                "snap_name": self.snap_name,
            }
            self.rtid = self.create_receive_trail(self.rid, data)

            # delete the share, move the oldest snap to share
            self.msg = "Failed to promote the oldest Snapshot to Share."
            oldest_snap = get_oldest_snap(self.snap_dir,
                                          self.num_retain_snaps,
                                          regex="_replication_")
            if oldest_snap is not None:
                self.update_repclone(self.sname, oldest_snap)
                self.refresh_share_state()
                self.refresh_snapshot_state()

            self.msg = "Failed to prune old Snapshots"
            self._delete_old_snaps(self.sname, self.snap_dir,
                                   self.num_retain_snaps + 1)

            # TODO: The following should be re-instantiated once we have a
            # TODO: working method for doing so. see validate_src_share.
            # self.msg = ('Failed to validate the source share(%s) on '
            #             'sender(uuid: %s '
            #             ') Did the ip of the sender change?' %
            #             (self.src_share, self.sender_id))
            # self.validate_src_share(self.sender_id, self.src_share)

            sub_vol = "%s%s/%s" % (settings.MNT_PT, self.dest_pool, self.sname)
            if not is_subvol(sub_vol):
                self.msg = "Failed to create parent subvolume %s" % sub_vol
                run_command([BTRFS, "subvolume", "create", sub_vol])

            self.msg = "Failed to create snapshot directory: %s" % self.snap_dir
            run_command(["/usr/bin/mkdir", "-p", self.snap_dir])
            snap_fp = "%s/%s" % (self.snap_dir, self.snap_name)

            # If the snapshot already exists, presumably from the previous
            # attempt and the sender tries to send the same, reply back with
            # snap_exists and do not start the btrfs-receive
            if is_subvol(snap_fp):
                logger.debug("Id: %s. Snapshot to be sent(%s) already "
                             "exists. Not starting a new receive process" %
                             (self.identity, snap_fp))
                self._send_recv("snap-exists")
                self._sys_exit(0)

            cmd = [BTRFS, "receive", self.snap_dir]
            self.msg = ("Failed to start the low level btrfs receive "
                        "command(%s). Aborting." % cmd)
            self.rp = subprocess.Popen(
                cmd,
                shell=False,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )

            self.msg = "Failed to send receiver-ready"
            rcommand, rmsg = self._send_recv("receiver-ready", latest_snap
                                             or "")
            if rcommand is None:
                logger.error("Id: %s. No response from the broker for "
                             "receiver-ready command. Aborting." %
                             self.identity)
                self._sys_exit(3)

            term_commands = (
                "btrfs-send-init-error",
                "btrfs-send-unexpected-termination-error",
                "btrfs-send-nonzero-termination-error",
            )
            num_tries = 10
            poll_interval = 6000  # 6 seconds
            num_msgs = 0
            t0 = time.time()
            while True:
                socks = dict(self.poll.poll(poll_interval))
                if socks.get(self.dealer) == zmq.POLLIN:
                    # reset to wait upto 60(poll_interval x num_tries
                    # milliseconds) for every message
                    num_tries = 10
                    command, message = self.dealer.recv_multipart()
                    if command == "btrfs-send-stream-finished":
                        # this command concludes fsdata transfer. After this,
                        # btrfs-recev process should be
                        # terminated(.communicate).
                        if self.rp.poll() is None:
                            self.msg = "Failed to terminate btrfs-recv command"
                            out, err = self.rp.communicate()
                            out = out.split("\n")
                            err = err.split("\n")
                            logger.debug("Id: %s. Terminated btrfs-recv. "
                                         "cmd = %s out = %s err: %s rc: %s" %
                                         (self.identity, cmd, out, err,
                                          self.rp.returncode))
                        if self.rp.returncode != 0:
                            self.msg = ("btrfs-recv exited with unexpected "
                                        "exitcode(%s). " % self.rp.returncode)
                            raise Exception(self.msg)
                        data = {
                            "status": "succeeded",
                            "kb_received": self.total_bytes_received / 1024,
                        }
                        self.msg = (
                            "Failed to update receive trail for rtid: %d" %
                            self.rtid)
                        self.update_receive_trail(self.rtid, data)

                        self._send_recv("btrfs-recv-finished")
                        self.refresh_share_state()
                        self.refresh_snapshot_state()

                        dsize, drate = self.size_report(
                            self.total_bytes_received, t0)
                        logger.debug("Id: %s. Receive complete. Total data "
                                     "transferred: %s. Rate: %s/sec." %
                                     (self.identity, dsize, drate))
                        self._sys_exit(0)

                    if command in term_commands:
                        self.msg = ("Terminal command(%s) received from the "
                                    "sender. Aborting." % command)
                        raise Exception(self.msg)

                    if self.rp.poll() is None:
                        self.rp.stdin.write(message)
                        self.rp.stdin.flush()
                        # @todo: implement advanced credit request system.
                        self.dealer.send_multipart([b"send-more", ""])
                        num_msgs += 1
                        self.total_bytes_received += len(message)
                        if num_msgs == 1000:
                            num_msgs = 0
                            data = {
                                "status": "pending",
                                "kb_received":
                                self.total_bytes_received / 1024,
                            }
                            self.update_receive_trail(self.rtid, data)

                            dsize, drate = self.size_report(
                                self.total_bytes_received, t0)
                            logger.debug("Id: %s. Receiver alive. Data "
                                         "transferred: %s. Rate: %s/sec." %
                                         (self.identity, dsize, drate))
                    else:
                        out, err = self.rp.communicate()
                        out = out.split("\n")
                        err = err.split("\n")
                        logger.error("Id: %s. btrfs-recv died unexpectedly. "
                                     "cmd: %s out: %s. err: %s" %
                                     (self.identity, cmd, out, err))
                        msg = (
                            "Low level system error from btrfs receive "
                            "command. cmd: %s out: %s err: %s for rtid: %s" %
                            (cmd, out, err, self.rtid))
                        data = {
                            "status": "failed",
                            "error": msg,
                        }
                        self.msg = ("Failed to update receive trail for "
                                    "rtid: %d." % self.rtid)
                        self.update_receive_trail(self.rtid, data)
                        self.msg = msg
                        raise Exception(self.msg)
                else:
                    num_tries -= 1
                    msg = ("No response received from the broker. "
                           "remaining tries: %d" % num_tries)
                    logger.error("Id: %s. %s" % (self.identity, msg))
                    if num_tries == 0:
                        self.msg = "%s. Terminating the receiver." % msg
                        raise Exception(self.msg)
Пример #43
0
def umount_root(root_pool_mnt):
    if (not os.path.exists(root_pool_mnt)):
        return
    try:
        o, e, rc = run_command([UMOUNT, '-l', root_pool_mnt])
    except CommandException, ce:
        if (ce.rc == 32):
            for l in ce.err:
                l = l.strip()
                if (re.search('not mounted$', l) is not None):
                    return
            raise ce
    for i in range(20):
        if (not is_mounted(root_pool_mnt)):
            run_command([RMDIR, root_pool_mnt])
            return
        time.sleep(2)
    run_command([UMOUNT, '-f', root_pool_mnt])
    run_command([RMDIR, root_pool_mnt])
    return


def is_subvol(mnt_pt):
    """
    Simple wrapper around "btrfs subvolume show mnt_pt"
    :param mnt_pt: mount point of subvolume to query
    :return: True if subvolume mnt_pt exists, else False
    """
    show_cmd = [BTRFS, 'subvolume', 'show', mnt_pt]
    # Throw=False on run_command to silence CommandExceptions.
Пример #44
0
def rm_container(name):
    o, e, rc = run_command([DOCKER, 'stop', name], throw=False)
    o, e, rc = run_command([DOCKER, 'rm', name], throw=False)
    return logger.debug('Attempted to remove a container(%s). out: %s '
                        'err: %s rc: %s.' % (name, o, e, rc))
Пример #45
0
def owncloud_install(rockon):
    """
    Custom config for the owncloud Rock-on install.
    :param rockon:
    :return:
    """
    for c in DContainer.objects.filter(rockon=rockon).order_by("launch_order"):
        rm_container(c.name)
        cmd = list(DCMD2) + [
            "--name",
            c.name,
        ]
        db_user = DCustomConfig.objects.get(rockon=rockon, key="db_user").val
        db_pw = DCustomConfig.objects.get(rockon=rockon, key="db_pw").val
        if c.dimage.name == "postgres":
            # change permissions on the db volume to 700
            vo = DVolume.objects.get(container=c)
            share_mnt = "%s%s" % (settings.MNT_PT, vo.share.name)
            run_command(["/usr/bin/chmod", "700", share_mnt])
            cmd.extend(
                [
                    "-e",
                    "POSTGRES_USER=%s" % db_user,
                    "-e",
                    "POSTGRES_PASSWORD=%s" % db_pw,
                ]
            )
        cmd.extend(port_ops(c))
        for lo in DContainerLink.objects.filter(destination=c):
            cmd.extend(["--link", "%s:%s" % (lo.source.name, lo.name)])
        cmd.extend(vol_ops(c))
        if c.name == "owncloud":
            cmd.extend(
                [
                    "-v",
                    "%s/rockstor.key:/etc/ssl/private/owncloud.key"
                    % settings.CERTDIR,  # noqa E501
                    "-v",
                    "%s/rockstor.cert:/etc/ssl/certs/owncloud.crt"
                    % settings.CERTDIR,  # noqa E501
                    "-e",
                    "HTTPS_ENABLED=true",
                ]
            )
            cmd.extend(
                ["-e", "DB_USER=%s" % db_user, "-e", "DB_PASS=%s" % db_pw,]
            )
        image_name_plus_tag = c.dimage.name + ":" + c.dimage.tag
        cmd.append(image_name_plus_tag)
        logger.debug("Docker cmd = ({}).".format(cmd))
        run_command(cmd)
        if c.dimage.name == "postgres":
            # make sure postgres is setup
            cur_wait = 0
            while True:
                o, e, rc = run_command(
                    [DOCKER, "exec", c.name, "psql", "-U", "postgres", "-c", "\l"],
                    throw=False,
                )
                if rc == 0:
                    break
                if cur_wait > 300:
                    logger.error(
                        "Waited too long (300 seconds) for "
                        "postgres to initialize for owncloud. "
                        "giving up."
                    )
                    break
                time.sleep(1)
                cur_wait += 1
Пример #46
0
def snapshot_list(mnt_pt):
    o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', mnt_pt])
    snaps = []
    for s in o:
        snaps.append(s.split()[-1])
    return snaps
Пример #47
0
def discourse_start(rockon):
    repo = discourse_repo(rockon)
    return run_command(["%s/launcher" % repo, "start", rockon.name.lower()])
Пример #48
0
def discourse_install(rockon):
    #1. install git
    git = '/usr/bin/git'
    if (not os.path.isfile(git)):
        install_pkg('git')

    #2. prep Discourse.yml
    repo = discourse_repo(rockon)
    if (not os.path.isdir(repo)):
        run_command([
            git, 'clone', 'https://github.com/discourse/discourse_docker.git',
            repo
        ])

    co = DContainer.objects.get(rockon=rockon)
    po = DPort.objects.get(container=co)
    cc_map = {}
    for cco in DCustomConfig.objects.filter(rockon=rockon):
        cc_map[cco.key] = cco.val
    mem = int((psutil.virtual_memory().total / (1024 * 1024)) * .25)

    fo, npath = mkstemp()
    src_yml = '%s/samples/standalone.yml' % repo
    dst_yml = '%s/containers/%s.yml' % (repo, rockon.name.lower())
    with open(src_yml) as sfo, open(npath, 'w') as tfo:
        for line in sfo.readlines():
            if (re.match('  - "80:80"', line) is not None):
                tfo.write('  - "%d:80"\n' % po.hostp)
            elif (re.match('  #db_shared_buffers:', line) is not None):
                tfo.write('  db_shared_buffers: "%dMB"\n' % mem)
            elif (re.match('  #UNICORN_WORKERS:', line) is not None):
                tfo.write('  UNICORN_WORKERS: 3\n')
            elif (re.match('  DISCOURSE_DEVELOPER_EMAILS:', line) is not None):
                tfo.write("  DISCOURSE_DEVELOPER_EMAILS: '%s'\n" %
                          cc_map['admin-email'])
            elif (re.match('  DISCOURSE_HOSTNAME:', line) is not None):
                tfo.write("  DISCOURSE_HOSTNAME: '%s'\n" % cc_map['hostname'])
            elif (re.match('  DISCOURSE_SMTP_ADDRESS:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_ADDRESS: %s\n' %
                          cc_map['smtp-address'])
            elif (re.match('  #DISCOURSE_SMTP_PORT:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_PORT: %s\n' % cc_map['smtp-port'])
            elif (re.match('  #DISCOURSE_SMTP_USER_NAME:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_USER_NAME: %s\n' %
                          cc_map['smtp-username'])
            elif (re.match('  #DISCOURSE_SMTP_PASSWORD:'******'  DISCOURSE_SMTP_PASSWORD: %s\n' %
                          cc_map['smtp-password'])
            elif (re.match('      host: /var/discourse/shared/standalone',
                           line) is not None):
                tfo.write('      host: %s/shares/standalone\n' % repo)
            elif (re.match(
                    '      host: /var/discourse/shared/standalone/log/var-log',
                    line) is not None):
                tfo.write('      host: %s/shared/standalone/log/var-log\n' %
                          repo)
            else:
                tfo.write(line)
    move(npath, dst_yml)

    #3. bootstrap: launcher bootstrap app
    run_command(['%s/launcher' % repo, 'bootstrap', rockon.name.lower()])

    #4. start: launcher start app
    run_command(['%s/launcher' % repo, 'start', rockon.name.lower()])
Пример #49
0
def set_def_kernel(logger, version=settings.SUPPORTED_KERNEL_VERSION):
    supported_kernel_path = ('/boot/vmlinuz-%s' % version)
    if (not os.path.isfile(supported_kernel_path)):
        return logger.error('Supported kernel(%s) does not exist' %
                            supported_kernel_path)
    try:
        o, e, rc = run_command([GRUBBY, '--default-kernel'])
        if (o[0] == supported_kernel_path):
            return logging.info('Supported kernel(%s) is already the default' %
                                supported_kernel_path)
    except Exception, e:
        return logger.error('Exception while listing the default kernel: %s' %
                            e.__str__())

    try:
        run_command([GRUBBY, '--set-default=%s' % supported_kernel_path])
        return logger.info('Default kernel set to %s' % supported_kernel_path)
    except Exception, e:
        return logger.error(
            'Exception while setting kernel(%s) as default: %s' %
            (version, e.__str__()))


def update_tz(logging):
    # update timezone variable in settings.py
    zonestr = os.path.realpath('/etc/localtime').split('zoneinfo/')[1]
    logging.info('system timezone = %s' % zonestr)
    sfile = '%s/src/rockstor/settings.py' % BASE_DIR
    fo, npath = mkstemp()
    updated = False
    with open(sfile) as sfo, open(npath, 'w') as tfo:
Пример #50
0
def discourse_start(rockon):
    repo = discourse_repo(rockon)
    return run_command(['%s/launcher' % repo, 'start', rockon.name.lower()])
Пример #51
0
def btrfs_uuid(disk):
    """return uuid of a btrfs filesystem"""
    o, e, rc = run_command(
        [BTRFS, 'filesystem', 'show',
         '/dev/disk/by-id/%s' % disk])
    return o[0].split()[3]
Пример #52
0
def discourse_uninstall(rockon):
    repo = discourse_repo(rockon)
    if (os.path.isdir(repo)):
        run_command(['%s/launcher' % repo, 'destroy', rockon.name.lower()])
    return run_command(['/usr/bin/rm', '-rf', repo])
Пример #53
0
def switch_quota(pool, flag='enable'):
    root_mnt_pt = mount_root(pool)
    cmd = [BTRFS, 'quota', flag, root_mnt_pt]
    return run_command(cmd)
Пример #54
0
def establish_keyfile(dev_byid, keyfile_withpath, passphrase):
    """
    Ensures that the given keyfile_withpath exists and calls create_keyfile()
    if it doesn't. Then attempts to register the established keyfile with the
    dev_byid device via "cryptsetup luksAddKey dev keyfile passphrase". But
    only if the passphrase is found to not equal '', flag for skip luksAddKey.
    N.B. The passphrase is passed to the command via a secure temporary file.
    Care is taken to remove this file irrespective of outcome.
    An existing keyfile will not be altered or deleted but a freshly created
    keyfile will be removed if our 'cryptsetup luksAddKey' returns non zero.
    :param dev_byid: by-id type name without path as found in db Disks.name.
    :param keyfile_withpath: the intended keyfile with full path.
    :param passphrase: LUKS passphrase: any current key slot passphrase. If
    an empty passphrase is passed then 'cryptsetup luksAddKey' is skipped.
    :return: True if keyfile successfully registered. False or an Exception 
    is raised in all other instances.
    """
    fresh_keyfile = False  # Until we find otherwise.
    # First we establish if our keyfile exists, and if not we create it.
    if not os.path.isfile(keyfile_withpath):
        # attempt to create our keyfile:
        if not create_keyfile(keyfile_withpath):
            # msg = ('Failed to establish new or existing keyfile: %s: %s' %
            #        (keyfile_withpath, e.__str__()))
            # raise Exception(msg)
            return False
        fresh_keyfile = True
    # We are by now assured of an existing keyfile_withpath.
    # Only register this keyfile with our LUKS container if needed:
    if passphrase == '':
        # If an empty passphrase was passed then we interpret this as a flag
        # to indicate no requirement to 'cryptsetup luksAddKey' so we are now
        # done. Use case is the return to "auto unlock via keyfile" when that
        # keyfile has already been registered. UI will not ask for passphrase
        # as it is assumed that an existing keyfile is already registered.
        return True
    dev_byid_withpath = get_device_path(dev_byid)
    tfo, npath = mkstemp()
    # Pythons _candidate_tempdir_list() should ensure our npath temp file is
    # in memory (tmpfs). From https://docs.python.org/2/library/tempfile.html
    # we have "Creates a temporary file in the most secure manner possible."
    # Populate this file with our passphrase and use as cryptsetup keyfile.
    # We set rc in case our try fails earlier than our run_command.
    rc = 0
    cmd = [
        CRYPTSETUP, 'luksAddKey', dev_byid_withpath, keyfile_withpath,
        '--key-file', npath
    ]
    try:
        with open(npath, 'w') as passphrase_file_object:
            passphrase_file_object.write(passphrase)
        out, err, rc = run_command(cmd, throw=False)
        if rc != 0:  # our luksAddKey command failed.
            if fresh_keyfile:
                # a freshly created keyfile without successful luksAddKey is
                # meaningless so remove it.
                os.remove(keyfile_withpath)
            raise CommandException(('%s' % cmd), out, err, rc)
    except Exception as e:
        if rc == 1:
            msg = 'Wrong Parameters exception'
        elif rc == 2:
            msg = 'No Permission (Bad Passphrase) exception'
        elif rc == 3:
            msg = 'Out of Memory exception'
        elif rc == 4:
            msg = 'Wrong Device Specified exception'
        elif rc == 5:
            msg = "Device already exists or device is busy exception"
        else:
            msg = 'Exception'
        msg += ' while running command(%s): %s' % (cmd, e.__str__())
        raise Exception(msg)
    finally:
        passphrase_file_object.close()
        if os.path.exists(npath):
            try:
                os.remove(npath)
            except Exception as e:
                msg = ('Exception while removing temp file %s: %s' %
                       (npath, e.__str__()))
                raise Exception(msg)
    return True
Пример #55
0
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    set_def_kernel(logging)
    try:
        delete_old_kernels(logging)
    except Exception as e:
        logging.debug('Exception while deleting old kernels. Soft error. '
                      'Moving on.')
        logging.exception(e)

    cert_loc = '%s/certs/' % BASE_DIR
    if (os.path.isdir(cert_loc)):
        if (not os.path.isfile('%s/rockstor.cert' % cert_loc)
                or not os.path.isfile('%s/rockstor.key' % cert_loc)):
            shutil.rmtree(cert_loc)

    if (not os.path.isdir(cert_loc)):
        os.mkdir(cert_loc)
        dn = ("/C=US/ST=Rockstor user's state/L=Rockstor user's "
              "city/O=Rockstor user/OU=Rockstor dept/CN=rockstor.user")
        logging.info('Creating openssl cert...')
        run_command([
            OPENSSL, 'req', '-nodes', '-newkey', 'rsa:2048', '-keyout',
            '%s/first.key' % cert_loc, '-out',
            '%s/rockstor.csr' % cert_loc, '-subj', dn
        ])
        logging.debug('openssl cert created')
        logging.info('Creating rockstor key...')
        run_command([
            OPENSSL, 'rsa', '-in',
            '%s/first.key' % cert_loc, '-out',
            '%s/rockstor.key' % cert_loc
        ])
        logging.debug('rockstor key created')
        logging.info('Singing cert with rockstor key...')
        run_command([
            OPENSSL, 'x509', '-in',
            '%s/rockstor.csr' % cert_loc, '-out',
            '%s/rockstor.cert' % cert_loc, '-req', '-signkey',
            '%s/rockstor.key' % cert_loc, '-days', '3650'
        ])
        logging.debug('cert signed.')
        logging.info('restarting nginx...')
        run_command([SUPERCTL, 'restart', 'nginx'])

    cleanup_rclocal(logging)
    logging.info('Checking for flash and Running flash optimizations if '
                 'appropriate.')
    run_command([FLASH_OPTIMIZE, '-x'], throw=False)
    try:
        logging.info('Updating the timezone from the system')
        update_tz(logging)
    except Exception as e:
        logging.error('Exception while updating timezone: %s' % e.__str__())
        logging.exception(e)

    try:
        logging.info('Updating sshd_config')
        bootstrap_sshd_config(logging)
    except Exception as e:
        logging.error('Exception while updating sshd_config: %s' % e.__str__())

    if (not os.path.isfile(STAMP)):
        logging.info('Please be patient. This script could take a few minutes')
        shutil.copyfile('%s/conf/django-hack' % BASE_DIR,
                        '%s/django' % BASE_BIN)
        run_command([SYSCTL, 'enable', 'postgresql'])
        logging.debug('Progresql enabled')
        pg_data = '/var/lib/pgsql/data'
        if (os.path.isdir(pg_data)):
            shutil.rmtree('/var/lib/pgsql/data')
        logging.info('initializing Postgresql...')
        run_command(['/usr/bin/postgresql-setup', 'initdb'])
        logging.info('Done.')
        run_command([SYSCTL, 'restart', 'postgresql'])
        run_command([SYSCTL, 'status', 'postgresql'])
        logging.debug('Postgresql restarted')
        logging.info('Creating app databases...')
        run_command(['su', '-', 'postgres', '-c', '/usr/bin/createdb smartdb'])
        logging.debug('smartdb created')
        run_command(
            ['su', '-', 'postgres', '-c', '/usr/bin/createdb storageadmin'])
        logging.debug('storageadmin created')
        logging.info('Done')
        logging.info('Initializing app databases...')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql -c \"CREATE ROLE rocky WITH SUPERUSER LOGIN PASSWORD 'rocky'\""
        ])  # noqa E501
        logging.debug('rocky ROLE created')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql storageadmin -f %s/conf/storageadmin.sql.in" % BASE_DIR
        ])  # noqa E501
        logging.debug('storageadmin app database loaded')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql smartdb -f %s/conf/smartdb.sql.in" % BASE_DIR
        ])
        logging.debug('smartdb app database loaded')
        logging.info('Done')
        run_command([
            'cp', '-f',
            '%s/conf/postgresql.conf' % BASE_DIR, '/var/lib/pgsql/data/'
        ])
        logging.debug('postgresql.conf copied')
        run_command([
            'cp', '-f',
            '%s/conf/pg_hba.conf' % BASE_DIR, '/var/lib/pgsql/data/'
        ])
        logging.debug('pg_hba.conf copied')
        run_command([SYSCTL, 'restart', 'postgresql'])
        logging.info('Postgresql restarted')
        logging.info('Running app database migrations...')
        migration_cmd = [
            DJANGO,
            'migrate',
            '--noinput',
        ]
        fake_migration_cmd = migration_cmd + ['--fake']
        smartdb_opts = ['--database=smart_manager', 'smart_manager']
        run_command(fake_migration_cmd + ['storageadmin', '0001_initial'])
        run_command(fake_migration_cmd + smartdb_opts + ['0001_initial'])
        run_command(migration_cmd + ['storageadmin'])
        run_command(migration_cmd + smartdb_opts)
        run_command(migration_cmd + ['auth'])
        run_command(migration_cmd + ['django_ztask'])
        logging.info('Done')
        logging.info('Running prepdb...')
        run_command([
            PREP_DB,
        ])
        logging.info('Done')
        run_command(['touch', STAMP])
        require_postgres(logging)
        logging.info('Done')
    else:
        logging.info('Running prepdb...')
        run_command([
            PREP_DB,
        ])

    logging.info('stopping firewalld...')
    run_command([SYSCTL, 'stop', 'firewalld'])
    run_command([SYSCTL, 'disable', 'firewalld'])
    logging.info('firewalld stopped and disabled')
    update_nginx(logging)

    shutil.copyfile('/etc/issue', '/etc/issue.rockstor')
    for i in range(30):
        try:
            if (init_update_issue() is not None):
                # init_update_issue() didn't cause an exception and did return
                # an ip so we break out of the multi try loop as we are done.
                break
            else:
                # execute except block with message so we can try again.
                raise Exception('default interface IP not yet configured')
        except Exception as e:
            # only executed if there is an actual exception with
            # init_update_issue() or if it returns None so we can try again
            # regardless as in both instances we may succeed on another try.
            logging.debug('Exception occurred while running update_issue: %s. '
                          'Trying again after 2 seconds.' % e.__str__())
            if (i > 28):
                logging.error('Failed to retrieve default interface IP address'
                              ' necessary for remote administration. '
                              'Quitting.')
                raise e
            time.sleep(2)

    enable_rockstor_service(logging)
    enable_bootstrap_service(logging)
Пример #56
0
    def run(self):
        msg = ('Failed to get the sender ip from the uuid(%s) for meta: %s' %
               (self.meta['uuid'], self.meta))
        with self._clean_exit_handler(msg):
            self.sender_ip = get_sender_ip(self.meta['uuid'], logger)

        msg = ('Failed to connect to the sender(%s) on data_port(%s). meta: '
               '%s. Aborting.' % (self.sender_ip, self.data_port, self.meta))
        with self._clean_exit_handler(msg):
            #@todo: add validation
            recv_sub = self.ctx.socket(zmq.SUB)
            recv_sub.connect('tcp://%s:%d' % (self.sender_ip, self.data_port))
            recv_sub.RCVTIMEO = 100
            recv_sub.setsockopt(zmq.SUBSCRIBE, str(self.meta['id']))

        msg = ('Failed to connect to the sender(%s) on '
               'meta_port(%d). meta: %s. Aborting.' %
               (self.sender_ip, self.meta_port, self.meta))
        with self._clean_exit_handler(msg):
            self.meta_push = self.ctx.socket(zmq.PUSH)
            self.meta_push.connect('tcp://%s:%d' % (self.sender_ip,
                                                    self.meta_port))

        sname = ('%s_%s' % (self.sender_id, self.src_share))
        if (not self.incremental):
            msg = ('Failed to verify/create share: %s. meta: %s. '
                   'Aborting.' % (sname, self.meta))
            with self._clean_exit_handler(msg, ack=True):
                create_share(sname, self.dest_pool, logger)

            msg = ('Failed to create the replica metadata object '
                   'for share: %s. meta: %s. Aborting.' %
                   (sname, self.meta))
            with self._clean_exit_handler(msg, ack=True):
                data = {'share': sname,
                        'appliance': self.sender_ip,
                        'src_share': self.src_share,
                        'data_port': self.data_port,
                        'meta_port': self.meta_port, }
                self.rid = create_rshare(data, logger)

        else:
            msg = ('Failed to retreive the replica metadata object for '
                   'share: %s. meta: %s. Aborting.' % (sname, self.meta))
            with self._clean_exit_handler(msg):
                self.rid = rshare_id(sname, logger)

        sub_vol = ('%s%s/.snapshots/%s' % (settings.MNT_PT, self.meta['pool'],
                                           sname))
        if (not is_subvol(sub_vol)):
            msg = ('Failed to create parent subvolume %s' % sub_vol)
            with self._clean_exit_handler(msg, ack=True):
                run_command([BTRFS, 'subvolume', 'create', sub_vol])

        snap_fp = ('%s/%s' % (sub_vol, self.snap_name))
        with self._clean_exit_handler(msg):
            if (is_subvol(snap_fp)):
                ack = {'msg': 'snap_exists',
                       'id': self.meta['id'], }
                self.meta_push.send_json(ack)

        cmd = [BTRFS, 'receive', sub_vol]
        msg = ('Failed to start the low level btrfs receive command(%s)'
               '. Aborting.' % (cmd))
        with self._clean_exit_handler(msg, ack=True):
            rp = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)

        msg = ('Failed to send begin_ok to the sender for meta: %s' %
               self.meta)
        with self._clean_exit_handler(msg):
            ack = {'msg': 'begin_ok',
                   'id': self.meta['id'], }
            self.meta_push.send_json(ack)
        recv_timeout_counter = 0
        credit = settings.DEFAULT_SEND_CREDIT
        check_credit = True
        while True:
            if (check_credit is True and credit < 5):
                ack = {'msg': 'send_more',
                       'id': self.meta['id'],
                       'credit': settings.DEFAULT_SEND_CREDIT, }
                self.meta_push.send_json(ack)
                credit = credit + settings.DEFAULT_SEND_CREDIT
                logger.debug('%d KB received for %s' %
                             (int(self.kb_received / 1024), sname))

            try:
                recv_data = recv_sub.recv()
                recv_data = recv_data[len(self.meta['id']):]
                credit = credit - 1
                recv_timeout_counter = 0
                self.kb_received = self.kb_received + len(recv_data)
                if (self.rtid is None):
                    msg = ('Failed to create snapshot: %s. Aborting.' %
                           self.snap_name)
                    # create a snapshot only if it's not already from a previous failed attempt
                    with self._clean_exit_handler(msg, ack=True):
                        create_snapshot(sname, self.snap_name, logger,
                                        snap_type='receiver')

                    data = {'snap_name': self.snap_name}
                    msg = ('Failed to create receive trail for rid: %d'
                           '. meta: %s' % (self.rid, self.meta))
                    with self._clean_exit_handler(msg, ack=True):
                        self.rtid = create_receive_trail(self.rid, data,
                                                         logger)

                if (recv_data == 'END_SUCCESS' or recv_data == 'END_FAIL'):
                    check_credit = False
                    ts = datetime.utcnow().replace(tzinfo=utc)
                    data = {'kb_received': self.kb_received / 1024, }
                    if (recv_data == 'END_SUCCESS'):
                        data['receive_succeeded'] = ts
                        #delete the share, move the oldest snap to share
                        oldest_snap = get_oldest_snap(sub_vol, 3)
                        if (oldest_snap is not None):
                            snap_path = ('%s/%s' % (sub_vol, oldest_snap))
                            share_path = ('%s%s/%s' %
                                          (settings.MNT_PT, self.dest_pool,
                                           sname))
                            msg = ('Failed to promote the oldest Snapshot(%s) '
                                   'to Share(%s)' % (snap_path, share_path))
                            try:
                                pool = Pool.objects.get(name=self.dest_pool)
                                pool_device = Disk.objects.filter(
                                    pool=pool)[0].name
                                remove_share(pool, pool_device, sname)
                                set_property(snap_path, 'ro', 'false',
                                             mount=False)
                                run_command(['/usr/bin/rm', '-rf', share_path],
                                            throw=False)
                                shutil.move(snap_path, share_path)
                                set_property(share_path, 'ro', 'true',
                                             mount=False)
                                delete_snapshot(sname, oldest_snap, logger)
                            except Exception, e:
                                logger.error(msg)
                                logger.exception(msg)
                    else:
                        logger.error('END_FAIL received for meta: %s. '
                                     'Terminating.' % self.meta)
                        rp.terminate()
                        data['receive_failed'] = ts
                        data['status'] = 'failed'

                    msg = ('Failed to update receive trail for rtid: %d'
                           '. meta: %s' % (self.rtid, self.meta))
                    with self._clean_exit_handler(msg, ack=True):
                        update_receive_trail(self.rtid, data, logger)
                    break
                if (rp.poll() is None):
                    rp.stdin.write(recv_data)
                    rp.stdin.flush()
                else:
                    logger.error('It seems the btrfs receive process died'
                                 ' unexpectedly.')
                    out, err = rp.communicate()
                    msg = ('Low level system error from btrfs receive '
                           'command. out: %s err: %s for rtid: %s meta: %s'
                           % (out, err, self.rtid, self.meta))
                    with self._clean_exit_handler(msg, ack=True):
                        ts = datetime.utcnow().replace(tzinfo=utc)
                        data = {'receive_failed': ts,
                                'status': 'failed',
                                'error': msg, }
                        update_receive_trail(self.rtid, data, logger)
            except zmq.error.Again:
                recv_timeout_counter = recv_timeout_counter + 1
                if (recv_timeout_counter > 600):
                    logger.error('Nothing received in the last 60 seconds '
                                 'from the sender for meta: %s. Aborting.'
                                 % self.meta)
                    self._sys_exit(3)
Пример #57
0
    def run(self):
        logger.debug('Id: %s. Starting a new Receiver for meta: %s' %
                     (self.identity, self.meta))
        self.msg = ('Top level exception in receiver')
        latest_snap = None
        with self._clean_exit_handler():
            self.law = APIWrapper()
            self.poll = zmq.Poller()
            self.dealer = self.ctx.socket(zmq.DEALER)
            self.dealer.setsockopt_string(zmq.IDENTITY, u'%s' % self.identity)
            self.dealer.set_hwm(10)
            self.dealer.connect('ipc://%s' %
                                settings.REPLICATION.get('ipc_socket'))
            self.poll.register(self.dealer, zmq.POLLIN)

            self.ack = True
            self.msg = ('Failed to get the sender ip for appliance: %s' %
                        self.sender_id)
            self.sender_ip = Appliance.objects.get(uuid=self.sender_id).ip

            if (not self.incremental):
                self.msg = ('Failed to verify/create share: %s.' % self.sname)
                self.create_share(self.sname, self.dest_pool)

                self.msg = ('Failed to create the replica metadata object '
                            'for share: %s.' % self.sname)
                data = {
                    'share': self.sname,
                    'appliance': self.sender_ip,
                    'src_share': self.src_share,
                }
                self.rid = self.create_rshare(data)
            else:
                self.msg = ('Failed to retreive the replica metadata '
                            'object for share: %s.' % self.sname)
                rso = ReplicaShare.objects.get(share=self.sname)
                self.rid = rso.id
                # Find and send the current snapshot to the sender. This will
                # be used as the start by btrfs-send diff.
                self.msg = ('Failed to verify latest replication snapshot '
                            'on the system.')
                latest_snap = self._latest_snap(rso)

            self.msg = ('Failed to create receive trail for rid: %d' %
                        self.rid)
            data = {
                'snap_name': self.snap_name,
            }
            self.rtid = self.create_receive_trail(self.rid, data)

            # delete the share, move the oldest snap to share
            self.msg = ('Failed to promote the oldest Snapshot to Share.')
            oldest_snap = get_oldest_snap(self.snap_dir,
                                          self.num_retain_snaps,
                                          regex='_replication_')
            if (oldest_snap is not None):
                snap_path = ('%s/%s' % (self.snap_dir, oldest_snap))
                share_path = ('%s%s/%s' %
                              (settings.MNT_PT, self.dest_pool, self.sname))
                pool = Pool.objects.get(name=self.dest_pool)
                remove_share(pool, self.sname, '-1/-1')
                set_property(snap_path, 'ro', 'false', mount=False)
                run_command(['/usr/bin/rm', '-rf', share_path], throw=False)
                shutil.move(snap_path, share_path)
                self.delete_snapshot(self.sname, oldest_snap)

            self.msg = ('Failed to prune old Snapshots')
            self._delete_old_snaps(self.sname, self.snap_dir,
                                   self.num_retain_snaps + 1)

            self.msg = ('Failed to validate the source share(%s) on '
                        'sender(uuid: %s '
                        ') Did the ip of the sender change?' %
                        (self.src_share, self.sender_id))
            self.validate_src_share(self.sender_id, self.src_share)

            sub_vol = ('%s%s/%s' %
                       (settings.MNT_PT, self.dest_pool, self.sname))
            if (not is_subvol(sub_vol)):
                self.msg = ('Failed to create parent subvolume %s' % sub_vol)
                run_command([BTRFS, 'subvolume', 'create', sub_vol])

            self.msg = ('Failed to create snapshot directory: %s' %
                        self.snap_dir)
            run_command(['/usr/bin/mkdir', '-p', self.snap_dir])
            snap_fp = ('%s/%s' % (self.snap_dir, self.snap_name))

            # If the snapshot already exists, presumably from the previous
            # attempt and the sender tries to send the same, reply back with
            # snap_exists and do not start the btrfs-receive
            if (is_subvol(snap_fp)):
                logger.debug('Id: %s. Snapshot to be sent(%s) already '
                             'exists. Not starting a new receive process' %
                             (self.identity, snap_fp))
                self._send_recv('snap-exists')
                self._sys_exit(0)

            cmd = [BTRFS, 'receive', self.snap_dir]
            self.msg = ('Failed to start the low level btrfs receive '
                        'command(%s). Aborting.' % cmd)
            self.rp = subprocess.Popen(cmd,
                                       shell=False,
                                       stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            self.msg = ('Failed to send receiver-ready')
            rcommand, rmsg = self._send_recv('receiver-ready', latest_snap
                                             or '')
            if (rcommand is None):
                logger.error('Id: %s. No response from the broker for '
                             'receiver-ready command. Aborting.' %
                             self.identity)
                self._sys_exit(3)

            term_commands = (
                'btrfs-send-init-error',
                'btrfs-send-unexpected-termination-error',
                'btrfs-send-nonzero-termination-error',
            )
            num_tries = 10
            poll_interval = 6000  # 6 seconds
            num_msgs = 0
            t0 = time.time()
            while (True):
                socks = dict(self.poll.poll(poll_interval))
                if (socks.get(self.dealer) == zmq.POLLIN):
                    # reset to wait upto 60(poll_interval x num_tries
                    # milliseconds) for every message
                    num_tries = 10
                    command, message = self.dealer.recv_multipart()
                    if (command == 'btrfs-send-stream-finished'):
                        # this command concludes fsdata transfer. After this,
                        # btrfs-recev process should be
                        # terminated(.communicate).
                        if (self.rp.poll() is None):
                            self.msg = ('Failed to terminate btrfs-recv '
                                        'command')
                            out, err = self.rp.communicate()
                            out = out.split('\n')
                            err = err.split('\n')
                            logger.debug('Id: %s. Terminated btrfs-recv. '
                                         'cmd = %s out = %s err: %s rc: %s' %
                                         (self.identity, cmd, out, err,
                                          self.rp.returncode))
                        if (self.rp.returncode != 0):
                            self.msg = ('btrfs-recv exited with unexpected '
                                        'exitcode(%s). ' % self.rp.returncode)
                            raise Exception(self.msg)
                        self._send_recv('btrfs-recv-finished')
                        self.refresh_share_state()
                        self.refresh_snapshot_state()

                        self.msg = ('Failed to update receive trail for '
                                    'rtid: %d' % self.rtid)
                        self.update_receive_trail(self.rtid, {
                            'status': 'succeeded',
                        })
                        dsize, drate = self.size_report(
                            self.total_bytes_received, t0)
                        logger.debug('Id: %s. Receive complete. Total data '
                                     'transferred: %s. Rate: %s/sec.' %
                                     (self.identity, dsize, drate))
                        self._sys_exit(0)

                    if (command in term_commands):
                        self.msg = ('Terminal command(%s) received from the '
                                    'sender. Aborting.' % command)
                        raise Exception(self.msg)

                    if (self.rp.poll() is None):
                        self.rp.stdin.write(message)
                        self.rp.stdin.flush()
                        # @todo: implement advanced credit request system.
                        self.dealer.send_multipart([b'send-more', ''])
                        num_msgs += 1
                        self.total_bytes_received += len(message)
                        if (num_msgs == 1000):
                            num_msgs = 0
                            dsize, drate = self.size_report(
                                self.total_bytes_received, t0)
                            logger.debug('Id: %s. Receiver alive. Data '
                                         'transferred: %s. Rate: %s/sec.' %
                                         (self.identity, dsize, drate))
                    else:
                        out, err = self.rp.communicate()
                        out = out.split('\n')
                        err = err.split('\n')
                        logger.error('Id: %s. btrfs-recv died unexpectedly. '
                                     'cmd: %s out: %s. err: %s' %
                                     (self.identity, cmd, out, err))
                        msg = (
                            'Low level system error from btrfs receive '
                            'command. cmd: %s out: %s err: %s for rtid: %s' %
                            (cmd, out, err, self.rtid))
                        data = {
                            'status': 'failed',
                            'error': msg,
                        }
                        self.msg = ('Failed to update receive trail for '
                                    'rtid: %d.' % self.rtid)
                        self.update_receive_trail(self.rtid, data)
                        self.msg = msg
                        raise Exception(self.msg)
                else:
                    num_tries -= 1
                    msg = ('No response received from the broker. '
                           'remaining tries: %d' % num_tries)
                    logger.error('Id: %s. %s' % (self.identity, msg))
                    if (num_tries == 0):
                        self.msg = ('%s. Terminating the receiver.' % msg)
                        raise Exception(self.msg)
Пример #58
0
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    set_def_kernel(logging)
    try:
        delete_old_kernels(logging)
    except Exception as e:
        logging.debug('Exception while deleting old kernels. Soft error. '
                      'Moving on.')
        logging.exception(e)

    cert_loc = '%s/certs/' % BASE_DIR
    if (os.path.isdir(cert_loc)):
        if (not os.path.isfile('%s/rockstor.cert' % cert_loc)
                or not os.path.isfile('%s/rockstor.key' % cert_loc)):
            shutil.rmtree(cert_loc)

    if (not os.path.isdir(cert_loc)):
        os.mkdir(cert_loc)
        dn = ("/C=US/ST=Rockstor user's state/L=Rockstor user's "
              "city/O=Rockstor user/OU=Rockstor dept/CN=rockstor.user")
        logging.info('Creating openssl cert...')
        run_command([
            OPENSSL, 'req', '-nodes', '-newkey', 'rsa:2048', '-keyout',
            '%s/first.key' % cert_loc, '-out',
            '%s/rockstor.csr' % cert_loc, '-subj', dn
        ])
        logging.debug('openssl cert created')
        logging.info('Creating rockstor key...')
        run_command([
            OPENSSL, 'rsa', '-in',
            '%s/first.key' % cert_loc, '-out',
            '%s/rockstor.key' % cert_loc
        ])
        logging.debug('rockstor key created')
        logging.info('Singing cert with rockstor key...')
        run_command([
            OPENSSL, 'x509', '-in',
            '%s/rockstor.csr' % cert_loc, '-out',
            '%s/rockstor.cert' % cert_loc, '-req', '-signkey',
            '%s/rockstor.key' % cert_loc, '-days', '3650'
        ])
        logging.debug('cert signed.')
        logging.info('restarting nginx...')
        run_command([SUPERCTL, 'restart', 'nginx'])

    cleanup_rclocal(logging)
    logging.info('Checking for flash and Running flash optimizations if '
                 'appropriate.')
    run_command([FLASH_OPTIMIZE, '-x'], throw=False)
    try:
        logging.info('Updating the timezone from the system')
        update_tz(logging)
    except Exception as e:
        logging.error('Exception while updating timezone: %s' % e.__str__())
        logging.exception(e)

    try:
        logging.info('Updating sshd_config')
        bootstrap_sshd_config(logging)
    except Exception as e:
        logging.error('Exception while updating sshd_config: %s' % e.__str__())

    if (not os.path.isfile(STAMP)):
        logging.info('Please be patient. This script could take a few minutes')
        shutil.copyfile('%s/conf/django-hack' % BASE_DIR,
                        '%s/django' % BASE_BIN)
        run_command([SYSCTL, 'enable', 'postgresql'])
        logging.debug('Progresql enabled')
        pg_data = '/var/lib/pgsql/data'
        if (os.path.isdir(pg_data)):
            shutil.rmtree('/var/lib/pgsql/data')
        logging.info('initializing Postgresql...')
        run_command(['/usr/bin/postgresql-setup', 'initdb'])
        logging.info('Done.')
        run_command([SYSCTL, 'restart', 'postgresql'])
        run_command([SYSCTL, 'status', 'postgresql'])
        logging.debug('Postgresql restarted')
        logging.info('Creating app databases...')
        run_command(['su', '-', 'postgres', '-c', '/usr/bin/createdb smartdb'])
        logging.debug('smartdb created')
        run_command(
            ['su', '-', 'postgres', '-c', '/usr/bin/createdb storageadmin'])
        logging.debug('storageadmin created')
        logging.info('Done')
        logging.info('Initializing app databases...')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql -c \"CREATE ROLE rocky WITH SUPERUSER LOGIN PASSWORD 'rocky'\""
        ])  # noqa E501
        logging.debug('rocky ROLE created')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql storageadmin -f %s/conf/storageadmin.sql.in" % BASE_DIR
        ])  # noqa E501
        logging.debug('storageadmin app database loaded')
        run_command([
            'su', '-', 'postgres', '-c',
            "psql smartdb -f %s/conf/smartdb.sql.in" % BASE_DIR
        ])
        logging.debug('smartdb app database loaded')
        logging.info('Done')
        run_command([
            'cp', '-f',
            '%s/conf/postgresql.conf' % BASE_DIR, '/var/lib/pgsql/data/'
        ])
        logging.debug('postgresql.conf copied')
        run_command([
            'cp', '-f',
            '%s/conf/pg_hba.conf' % BASE_DIR, '/var/lib/pgsql/data/'
        ])
        logging.debug('pg_hba.conf copied')
        run_command([SYSCTL, 'restart', 'postgresql'])
        logging.info('Postgresql restarted')
        run_command(['touch', STAMP])
        require_postgres(logging)
        logging.info('Done')

    logging.info('Running app database migrations...')
    migration_cmd = [
        DJANGO,
        'migrate',
        '--noinput',
    ]
    fake_migration_cmd = migration_cmd + ['--fake']
    smartdb_opts = ['--database=smart_manager', 'smart_manager']

    for app in ('storageadmin', 'smart_manager'):
        db = 'default'
        if app == 'smart_manager':
            db = app
        o, e, rc = run_command(
            [DJANGO, 'migrate', '--list',
             '--database=%s' % db, app])
        initial_faked = False
        for l in o:
            if l.strip() == '[X] 0001_initial':
                initial_faked = True
                break
        if not initial_faked:
            db_arg = '--database=%s' % db
            run_command(fake_migration_cmd + [db_arg, app, '0001_initial'])

    run_command(migration_cmd + ['storageadmin'])
    run_command(migration_cmd + smartdb_opts)
    run_command(migration_cmd + ['auth'])
    run_command(migration_cmd + ['django_ztask'])
    logging.info('Done')
    logging.info('Running prepdb...')
    run_command([
        PREP_DB,
    ])
    logging.info('Done')

    logging.info('stopping firewalld...')
    run_command([SYSCTL, 'stop', 'firewalld'])
    run_command([SYSCTL, 'disable', 'firewalld'])
    logging.info('firewalld stopped and disabled')
    update_nginx(logging)

    shutil.copyfile('/etc/issue', '/etc/issue.rockstor')
    init_update_issue(logging)

    enable_rockstor_service(logging)
    enable_bootstrap_service(logging)
Пример #59
0
def create_repclone(share, request, logger, snapshot):
    """
    Variant of create_clone but where the share already exists and is to be
    supplanted by a snapshot which is effectively moved into the shares prior
    position, both in the db and on the file system. This is achieved thus:
    Unmount target share - (via remove_share()).
    Btrfs subvol delete target share (via remove_share()).
    Remove prior target share mount point (dir).
    Move snap source to target share's former location (becomes share on disk).
    Update existing target share db entry with source snap's qgroup / usage.
    Remove source snap's db entry: updated share db entry makes it redundant.
    Remount share (which now represents the prior snap's subvol relocated).
    :param share: Share object to be supplanted
    :param request:
    :param logger: Logger object to reference
    :param snapshot: Source snapshot/quirk share object to supplant target.
    :return: response of serialized share (in it's updated form)
    """
    try:
        logger.debug('Supplanting share ({}) with '
                     'snapshot ({}).'.format(share.name, snapshot.name))
        # We first strip our snapshot.name of any path as when we encounter the
        # initially created receive subvol it is identified as a share with a
        # snapshots location as it's subvol name (current quirk of import sys).
        # E.g. first receive subvol/share-in-snapdir name example:
        # ".snapshots/C583C37F-...1712B_sharename/sharename_19_replication_1".
        # Subsequent more regular snapshots (in db as such) are named thus:
        # "sharename_19_replication_2" or "sharename_19_replication_2" and on.
        # The 19 in the above names is the generation of the replication task.
        #
        # Normalise source name across initial quirk share & subsequent snaps.
        source_name = snapshot.name.split('/')[-1]
        # Note in the above we have to use Object.name for polymorphism, but
        # our share is passed by it's subvol (potential fragility point).
        snap_path = '{}{}/.snapshots/{}/{}'.format(settings.MNT_PT,
                                                   share.pool.name, share.name,
                                                   source_name)
        # eg /mnt2/poolname/.snapshots/sharename/snapname
        share_path = ('{}{}/{}'.format(settings.MNT_PT, share.pool.name,
                                       share.name))
        # eg /mnt2/poolname/sharename
        # unmount and then subvol deletes our on disk share
        remove_share(share.pool, share.name, '-1/-1')
        # Remove read only flag on our snapshot subvol
        set_property(snap_path, 'ro', 'false', mount=False)
        # Ensure removed share path is clean, ie remove mount point.
        run_command(['/usr/bin/rm', '-rf', share_path], throw=False)
        # Now move snapshot to prior shares location. Given both a share and
        # a snapshot are subvols, we effectively promote the snap to a share.
        shutil.move(snap_path, share_path)
        # This should have re-established our just removed subvol.
        # Supplant share db info with snap info to reflect new on disk state.
        share.qgroup = snapshot.qgroup
        share.rusage = snapshot.rusage
        share.eusage = snapshot.eusage
        share.save()
        # delete our now redundant snapshot/quirky share db entry
        snapshot.delete()
        # update our share's quota
        update_quota(share.pool, share.pqgroup, share.size * 1024)
        # mount our newly supplanted share
        mnt_pt = '{}{}'.format(settings.MNT_PT, share.name)
        mount_share(share, mnt_pt)
        return Response(ShareSerializer(share).data)
    except Exception as e:
        handle_exception(e, request)
Пример #60
0
def mount_root(pool):
    """
    Mounts a given pool at the default mount root (usually /mnt2/) using the
    pool.name as the final path entry. Ie pool.name = test-pool will be mounted
    at /mnt2/test-pool. Any mount options held in pool.mnt_options will be added
    to the mount command via the -o option as will a compress=pool.compression
    entry.
    N.B. Initially the mount target is defined by /dev/disk/by-label/pool.name,
    if this fails then an attempt to mount by each member of
    /dev/disk/by-id/pool.disk_set.all() but only if there are any members.
    If this second method also fails then an exception is raised, currently all
    but the last failed mount by device name is logged. If no disk members were
    reported by pool.disk_set.count() a separate Exception is raised.
    :param pool: pool object
    :return: either the relevant mount point or an Exception which either
    indicates 'no disks in pool' or 'Unknown Reason'
    """
    root_pool_mnt = DEFAULT_MNT_DIR + pool.name
    if (is_share_mounted(pool.name)):
        return root_pool_mnt
    # Creates a directory to act as the mount point.
    create_tmp_dir(root_pool_mnt)
    mnt_device = '/dev/disk/by-label/%s' % pool.name
    mnt_cmd = [
        MOUNT,
        mnt_device,
        root_pool_mnt,
    ]
    mnt_options = ''
    if (pool.mnt_options is not None):
        mnt_options = pool.mnt_options
    if (pool.compression is not None):
        if (re.search('compress', mnt_options) is None):
            mnt_options = ('%s,compress=%s' % (mnt_options, pool.compression))
    if (os.path.exists(mnt_device)):
        if (len(mnt_options) > 0):
            mnt_cmd.extend(['-o', mnt_options])
        run_command(mnt_cmd)
        return root_pool_mnt
    # If we cannot mount by-label, let's try mounting by device; one by one
    # until we get our first success.
    if (pool.disk_set.count() < 1):
        raise Exception('Cannot mount Pool(%s) as it has no disks in it.' %
                        pool.name)
    last_device = pool.disk_set.last()
    for device in pool.disk_set.all():
        mnt_device = ('/dev/disk/by-id/%s' % device.name)
        if (os.path.exists(mnt_device)):
            mnt_cmd = [
                MOUNT,
                mnt_device,
                root_pool_mnt,
            ]
            if (len(mnt_options) > 0):
                mnt_cmd.extend(['-o', mnt_options])
            try:
                run_command(mnt_cmd)
                return root_pool_mnt
            except Exception, e:
                if (device.name == last_device.name):
                    # exhausted mounting using all devices in the pool
                    raise e
                logger.error(
                    'Error mounting: %s. Will try using another device.' %
                    mnt_cmd)
                logger.exception(e)