Exemple #1
0
def cleanup_and_restore_files(process):
    timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H%M%S')
    # Compress and timestamp the existing files minus the repos
    with sh.pushd('/opt/anaconda'):
        sh.tar(
            "--exclude=storage/object/anaconda-repository",
            "-czvf",
            f"git_pgdata.snapshot_{timestamp}.tar.gz",
            "storage"
        )

    # Cleanup directories as things will get restored
    sh.rm('-Rf', '/opt/anaconda/storage/git')
    sh.rm('-Rf', '/opt/anaconda/storage/pgdata')
    sh.rm('-Rf', '/opt/anaconda/storage/object/anaconda-objects')
    sh.rm('-Rf', '/opt/anaconda/storage/object/anaconda-projects')

    # Restore the files
    file_backup_restore(process, 'restore')

    # Recreate the postgres directory and set permissions
    sh.mkdir(process.postgres_system_backup)
    sh.chown('999:root', f'{process.postgres_system_backup}')
    sh.chmod('700', f'{process.postgres_system_backup}')

    return
Exemple #2
0
def install(installDir, domain, db_server, db_name, db_user, db_password):
    log( "Installing from index_cli.php ... " )
    cli = installDir + 'install/index_cli.php' 

    r = php(cli, "--domain={}".format(domain),
                 "--db_server={}".format(db_server),
                 "--db_name={}".format(db_name),
                 "--db_user={}".format(db_user),
                 "--db_password={}".format(db_password),
                 "--db_create=1",
                 "--ssl=0",
                 "[email protected]",
                 "--password=admin",
                 "--language=fr",
                 "--country=fr")
    print( r )

    log( "Removing install dir ... " )
    rm("-rf", installDir + 'install')

    log( "Removing var/cache/prod ...")
    rm("-rf", installDir + 'var/cache/prod')

    chown("-R", APP_OWNER, installDir)
    chmod("-R", "777", installDir + 'var/')
Exemple #3
0
    def create_master_prefix(self, master_prefix_path: str) -> None:
        # create master-prefix subvolume
        if not os.path.exists(master_prefix_path):
            print(' > Create master-prefix subvolume')
            master_prefix_name = os.path.basename(master_prefix_path)
            with sh.contrib.sudo:
                sh.btrfs.subvolume.create(master_prefix_name, _cwd=prefix_dir)
            assert os.path.exists(master_prefix_path), \
                f'"{master_prefix_path}" does not exist'
        else:
            print(' > Use existing master-prefix subvolume')

        print(' > Set owner to current user')
        with sh.contrib.sudo:
            sh.chown(f'{getpass.getuser()}:users', master_prefix_path)

        # boot wine in master-prefix subvolume
        print(' > Initializing git repository')
        self.prefix_handler._git('init', cwd=master_prefix_path)

        print(' > Booting wine')
        self.prefix_handler._wine('wineboot', cwd=master_prefix_path)

        print(' > Committing changes')
        self.prefix_handler._commit('Initial commit', cwd=master_prefix_path)
Exemple #4
0
    def __init__(self, source, dest, rsync_args='', user=None, **_):
        super().__init__()
        self.user = user
        self.args = (rsync_args, source, dest)

        dest_dir = os.path.split(dest)[0]
        if not os.path.isdir(dest_dir):
            if os.path.exists(dest_dir):
                logger.critical('Destination %s isn\'t valid because a file exists at %s', dest, dest_dir)
                sys.exit(1)
            sh.mkdir('-p', dest_dir)
            if user is not None:
                sh.chown('{}:'.format(user), dest_dir)

        if user is not None:
            self.rsync = sh.sudo.bake('-u', user, 'rsync')
        else:
            self.rsync = sh.rsync
        self.rsync = self.rsync.bake(source, dest, '--no-h', *rsync_args.split(), progress=True)
        self.daemon = True
        self.running = threading.Event()
        self._buffer = ''
        self._status = {
            'running': False,
            'source': source,
            'dest': dest,
        }
        self._status_lock = threading.Lock()
Exemple #5
0
    def initGit(self, file_content):
        from sh import git,chmod,chown
        git_repourl = 'git@' + Config.GIT_SVR + ':' + self.user_gitrepo
        os.chdir(Config.SDP_USER_DATA_HOME)
        git('clone', git_repourl)
        #git of add ci push
        os.chdir(self.userhome)
        with open('index.html', 'w') as f:
            f.write(file_content)
        git('add', 'index.html')
        git('commit', '-m', 'init commit')
        git('push', 'origin', 'master')
        #git of hooks, for update code
        post_update_content = """#!/bin/bash
#Automatically update the project code, if there is an automatic update error, will re deploy the code.
unset $(git rev-parse --local-env-vars)
DeployPath=%s
echo -e "\033[33mDeploy user is => %s\033[0m"
[ ! -d $DeployPath ] && echo -e "\033[31mDirectory $DeployPath does not exist!\033[0m" && exit 1
cd $DeployPath
git pull
if test $? -ne 0;then
    echo -e "\033[31mAutomatic pull fail, try to re deploy!\033[0m"
    cd ~
    rm -rf ${DeployPath}/*
    rm -rf ${DeployPath}/.git
    git clone %s $DeployPath
    [ $? -ne 0 ] && echo -e "\033[31mRedeploy fail, quit!\033[0m" && exit 1
fi
echo -e "\033[32mAutomatic deployment complete.\033[0m"
exit 0""" %(self.userhome, self.name, git_repourl)
        with open(os.path.join(self.user_gitrepo, 'hooks/post-update'), 'w') as f:
            f.write(post_update_content)
        chmod('a+x', os.path.join(self.user_gitrepo, 'hooks/post-update'))
        chown('-R', Config.GIT_USER, self.userhome)
Exemple #6
0
def update_file(fname, who):
    config = configparser.ConfigParser()

    config['ODBC Data Sources'] = {}
    drivers = config['ODBC Data Sources']
    drivers[CH_DSN] = CH_DRIVER

    config[CH_DSN] = {
        'driver':
        f'/usr/local/opt/clickhouse-odbc/lib/libclickhouseodbc{CH_VARIANT}.dylib',
        'description': 'Connection to criminals ClickHouse DB',
        'url':
        f'http://{CH_HOST}:{CH_PORT}/?database={CH_DB}&user={CH_USER}&password={CH_PASSWORD}',
        'server': CH_HOST,
        'password': CH_PASSWORD,
        'port': CH_PORT,
        'database': CH_DB,
        'uid': CH_USER,
        'sslmode': 'no'
    }

    with open(fname, 'w') as configfile:
        config.write(configfile)

    sh.chown(f"{who}:staff", fname)
    sh.chmod("644", fname)
Exemple #7
0
    def initSvn(self, file_content):
        from sh import svn, chmod, chown
        repourl = Config.SVN_ADDR + self.name
        hook_content = r'''#!/bin/bash
export LC_CTYPE=en_US.UTF-8
export LANG=en_US.UTF-8
svn up %s
''' % self.userhome
        #auto update with hook
        os.chdir(os.path.join(self.user_repo, 'hooks'))
        with open('post-commit', 'w') as f:
            f.write(hook_content)
        chmod('-R', 777, self.user_repo)
        #checkout add ci for init
        svn('co', '--non-interactive', '--trust-server-cert', repourl,
            self.userhome)
        chown('-R', Config.HTTPD_USER + ':' + Config.HTTPD_GROUP,
              self.userhome)
        os.chdir(self.userhome)
        with open('index.html', 'w') as f:
            f.write(file_content)
        svn('add', 'index.html')
        svn('ci', '--username', self.name, '--password', self.passwd,
            '--non-interactive', '--trust-server-cert', '-m', 'init commit',
            '--force-log')
Exemple #8
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        log.debug("Shutting down Local backend")
        # wait for queues to empty
        self.req_q.join()
        self.resp_q.join()

        # change the results owner
        if self.uid_gid is not None:
            sh.chown('-R', self.uid_gid, self.local_store)
    def __exit__(self, exc_type, exc_val, exc_tb):
        log.debug("Shutting down Local backend")
        # wait for queues to empty
        self.req_q.join()
        self.resp_q.join()

        # change the results owner
        if self.uid_gid is not None:
            sh.chown('-R', self.uid_gid, self.local_store)
Exemple #10
0
def sync_files(process):
    # Set permissions to rsync user for all of the backup directory
    sh.chown('-R', f'{process.sync_user}:{process.sync_user}',
             f'{process.backup_directory}')

    # Run the rsync to the sync server
    rsync_command = (
        f'rsync -avrq {process.backup_directory}/ '
        f'{process.sync_user}@{process.sync_node}:{process.backup_directory}')
    process.run_su_command(process.sync_user, rsync_command)
Exemple #11
0
	def MountCrypt(self, filepath):
		container = self.containers[filepath]
		self.logger.info('Mounting encrypted container [{0}] to [{1}]'.format(container.filepath, container.mount_path))
		if os.path.exists(container.mount_path) and os.path.isfile(container.mount_path):
			raise RuntimeError('The mount path [{0}] already exists and is a file'.format(container.mount_path))
		
		if not os.path.exists(container.mount_path):
			self.logger.info('Creating directory [{0}] for mounting encrypted container [{1}]'.format(container.mount_path, container.filepath))
			os.makedirs(container.mount_path)
			sh.chown(container.user_owner, container.mount_path)
		
		sh.mount('-t', 'btrfs', container.mapped_device, container.mount_path)
Exemple #12
0
def restore_postgres_database(process):
    process.get_postgres_docker_container()

    # Copy SQL backup to the DB directory so the container can see it
    sh.mv(f'{process.backup_directory}/{process.postgres_backup_name}',
          process.postgres_system_backup_path)

    # Change permissions on the backup
    sh.chown('polkitd:input', process.postgres_system_backup_path)
    restore_command = ("su - postgres -c 'psql -U postgres < "
                       f"{process.postgres_container_backup_path}'")
    process.run_command_on_container(process.docker_cont_id, restore_command)
Exemple #13
0
def store_pubkey(username, home_dir, pubkey):
    """
    Create the user's directory and copy the public key into it.
    Also, set the proper permissions.
    """
    dir_ssh = os.path.join(home_dir, '.ssh')
    if not os.path.exists(dir_ssh):
        os.makedirs(dir_ssh, mode=0o700)
    auth_key_path = os.path.join(dir_ssh, 'authorized_keys')
    pubkey = '{0}\n'.format(pubkey.strip())
    with open(auth_key_path, 'wb') as fd:
        fd.write(pubkey)
    os.chmod(auth_key_path, 0o600)
    sh.chown('-R', '{username}:{username}'.format(username=username), home_dir)
Exemple #14
0
def store_pubkey(username, home_dir, pubkey):
    """
    Create the user's directory and copy the public key into it.
    Also, set the proper permissions.
    """
    dir_ssh = os.path.join(home_dir, '.ssh')
    if not os.path.exists(dir_ssh):
        os.makedirs(dir_ssh, mode=0o700)
    auth_key_path = os.path.join(dir_ssh, 'authorized_keys')
    pubkey = '{0}\n'.format(pubkey.strip())
    with open(auth_key_path, 'wb') as fd:
        fd.write(pubkey)
    os.chmod(auth_key_path, 0o600)
    sh.chown('-R', '{username}:{username}'.format(username=username), home_dir)
Exemple #15
0
def setupWP(domain,username,password):
	siteRoot = '/webapps/%s'%(domain)
	siteLogs = '/webapps/%s/logs'%(domain)
	sitePublic = '/webapps/%s/public'%(domain)
	wpConfTemplate = 'wp.nginx.vhost.conf.template'
	sh.useradd('-m','-d',siteRoot, username,'-s', '/bin/bash','-p', password)
	sh.usermod('-aG', username, WEB_SERVER_GROUP)
	sh.mkdir('-p', siteLogs)
	sh.mkdir('-p', sitePublic)
	sh.cp('index.php', sitePublic)
	sh.chmod('-R','750', siteRoot)
	sh.chmod('-R','770', siteLogs)
	sh.chown('-R',"%s:%s"%(username,username), siteRoot)
	setupNginx(domain,username,wpConfTemplate,sitePublic,siteLogs)
	setupPhpFpm(username)
Exemple #16
0
    def CreateApacheSvn(self, connect=Config.SVN_TYPE):
        from sh import svnadmin, chown, htpasswd, apachectl
        if not os.path.exists(Config.SVN_ROOT):
            os.mkdir(Config.SVN_ROOT)
        svnadmin('create', self.user_repo)
        chown('-R', Config.HTTPD_USER + ':' + Config.HTTPD_GROUP,
              self.user_repo)
        http_user_repo_content = r'''<Location /sdp/%s>
    DAV svn
    SVNPath %s
    AuthType Basic
    AuthName "Welcome to Sdp CodeRoot!"
    AuthUserFile %s
    <LimitExcept GET PROPFIND OPTIONS REPORT>
        Require valid-user
    </LimitExcept>
</Location>
''' % (self.name, self.user_repo, Config.SVN_PASSFILE)

        https_user_repo_content = r'''<Location /sdp/%s>
    DAV svn
    SVNPath %s
    AuthType Basic
    AuthName "Welcome to Sdp CodeRoot!"
    AuthUserFile %s
    SSLRequireSSL
    <LimitExcept GET PROPFIND OPTIONS REPORT>
        Require valid-user
    </LimitExcept>
</Location>
''' % (self.name, self.user_repo, Config.SVN_PASSFILE)

        if connect == 'http':
            user_repo_content = http_user_repo_content
        elif connect == 'https':
            user_repo_content = https_user_repo_content
        else:
            raise TypeError('Only support http or https.')

        with open(Config.HTTPD_CONF, 'a+') as f:
            f.write(user_repo_content)

        if os.path.exists(Config.SVN_PASSFILE):
            htpasswd('-mb', Config.SVN_PASSFILE, self.name, self.passwd)
        else:
            htpasswd('-cb', Config.SVN_PASSFILE, self.name, self.passwd)

        apachectl('restart')  #sh.Command(script)
Exemple #17
0
def restore_repo_db(process):
    process.get_postgres_docker_container()

    # Copy backup to the DB directory so the container can see it
    sh.mv(f'{process.backup_directory}/{process.repository_db_name}',
          f'{process.postgres_system_backup}/{process.repository_db_name}')

    # Change permissions on the backup
    sh.chown('polkitd:input',
             f'{process.postgres_system_backup}/{process.repository_db_name}')
    restore_command = (
        "su - postgres -c 'pg_restore -U postgres --clean -d "
        f"anaconda_repository {process.postgres_container_backup}/"
        f"{process.repository_db_name}'")
    # Restore the repository DB
    process.run_command_on_container(process.docker_cont_id, restore_command)
Exemple #18
0
def setupWP(domain, username, password):
    siteRoot = '/webapps/%s' % (domain)
    siteLogs = '/webapps/%s/logs' % (domain)
    sitePublic = '/webapps/%s/public' % (domain)
    wpConfTemplate = 'wp.nginx.vhost.conf.template'
    sh.useradd('-m', '-d', siteRoot, username, '-s', '/bin/bash', '-p',
               password)
    sh.usermod('-aG', username, WEB_SERVER_GROUP)
    sh.mkdir('-p', siteLogs)
    sh.mkdir('-p', sitePublic)
    sh.cp('index.php', sitePublic)
    sh.chmod('-R', '750', siteRoot)
    sh.chmod('-R', '770', siteLogs)
    sh.chown('-R', "%s:%s" % (username, username), siteRoot)
    setupNginx(domain, username, wpConfTemplate, sitePublic, siteLogs)
    setupPhpFpm(username)
Exemple #19
0
def reset_permissions(username, folder):
    """
    Reset UNIX file permissions on a Plone installation folder.

    We set files readable only by the owner.
    """
    from sh import chmod

    print "Re(setting) file permissions on %s" % folder
    # Disable read access for other UNIX users
    chown("-R", "%s:%s" % (username, username), folder)
    chmod("-R", "o-rwx", folder)

    # In the case someone has run the buildout as root and
    # left badly owned, non-writable files around
    chmod("-R", "u+rwx", folder)
Exemple #20
0
    def CreateApacheSvn(self, connect=Config.SVN_TYPE):
        from sh import svnadmin, chown, htpasswd, apachectl
        if not os.path.exists(Config.SVN_ROOT):
            os.mkdir(Config.SVN_ROOT)
        svnadmin('create', self.user_repo)
        chown('-R', Config.HTTPD_USER + ':' + Config.HTTPD_GROUP, self.user_repo)
        http_user_repo_content = r'''<Location /sdp/%s>
    DAV svn
    SVNPath %s
    AuthType Basic
    AuthName "Welcome to Sdp CodeRoot!"
    AuthUserFile %s
    <LimitExcept GET PROPFIND OPTIONS REPORT>
        Require valid-user
    </LimitExcept>
</Location>
''' % (self.name, self.user_repo, Config.SVN_PASSFILE)

        https_user_repo_content = r'''<Location /sdp/%s>
    DAV svn
    SVNPath %s
    AuthType Basic
    AuthName "Welcome to Sdp CodeRoot!"
    AuthUserFile %s
    SSLRequireSSL
    <LimitExcept GET PROPFIND OPTIONS REPORT>
        Require valid-user
    </LimitExcept>
</Location>
''' % (self.name, self.user_repo, Config.SVN_PASSFILE)

        if connect == 'http':
            user_repo_content = http_user_repo_content
        elif connect == 'https':
            user_repo_content = https_user_repo_content
        else:
            raise TypeError('Only support http or https.')

        with open(Config.HTTPD_CONF, 'a+') as f:
            f.write(user_repo_content)

        if os.path.exists(Config.SVN_PASSFILE):
            htpasswd('-mb', Config.SVN_PASSFILE, self.name, self.passwd)
        else:
            htpasswd('-cb', Config.SVN_PASSFILE, self.name, self.passwd)

        apachectl('restart')  #sh.Command(script)
Exemple #21
0
 def restore_attachments(self, zipfile, docker=False):
     unzip = sh.unzip.bake('-x', '-qq', '-n')
     restore_folder = os.path.join(self.data_dir,
                                   'filestore',
                                   self.target_db)
     sh.mkdir('-p', restore_folder)
     # unzip will place files are in <datadir>/filestore/<dbname>/filestore,
     # we create a symlink to <datadir>/filestore/<dbname> so they wind up
     # in the right spot
     restore_folder_faulty = os.path.join(restore_folder, 'filestore')
     sh.ln('-s', restore_folder, restore_folder_faulty)
     unzip(zipfile, 'filestore/*', '-d', restore_folder)
     # cleanup the symlink
     sh.rm(restore_folder_faulty)
     # When running in docker mode, change permissions
     if docker:
         sh.chown('-R', '999:999', self.data_dir)
Exemple #22
0
	def _create_docroot(self):
		if os.path.exists(self.vhost_dir):
			raise Exception("Directory %s already exist!" % self.vhost_dir)

		print "Creating vhost %s in %s" % (self.fqdn, self.vhost_dir)

		os.makedirs(self.vhost_dir, 0755)
		for x in ['logs', 'htdocs', 'temp/uploads', 'temp/sessions']:
			os.makedirs(os.path.join(self.vhost_dir, x), 0755)

		chmod(550, os.path.join(self.vhost_dir, 'temp/'))

		owner = "%s:%s" % (self.user, self.group)
		chown(owner, self.vhost_dir, '-R')

		if self.enable_php:
			self._install_php()
Exemple #23
0
    def _create_test_filesystem(self, atime='off', compress='on'):
        fs = self._get_test_filesystem()
        if fs.exists():
            logging.info("Destroying existing test filesystem '%s'", fs)
            fs.destroy(confirm=True)

        logging.info("Creating test filesystem '%s'", fs)
        fs.create()

        logging.info("Setting atime='%s' compress='%s'", atime, compress)
        fs.properties['atime'] = atime
        fs.properties['compress'] = compress

        logging.info("Changing ownership")
        sh.chown('nobody:nogroup', str(fs.properties['mountpoint']))

        return fs
Exemple #24
0
    def _create_docroot(self):
        if os.path.exists(self.vhost_dir):
            raise Exception("Directory %s already exist!" % self.vhost_dir)

        print "Creating vhost %s in %s" % (self.fqdn, self.vhost_dir)

        os.makedirs(self.vhost_dir, 0755)
        for x in ['logs', 'htdocs', 'temp/uploads', 'temp/sessions']:
            os.makedirs(os.path.join(self.vhost_dir, x), 0755)

        chmod(550, os.path.join(self.vhost_dir, 'temp/'))

        owner = "%s:%s" % (self.user, self.group)
        chown(owner, self.vhost_dir, '-R')

        if self.enable_php:
            self._install_php()
def main(domain='goodcrypto.private.server.website'):
    '''
        Generate postgresql certficate.

        >>> main(domain='test.domain.com')
        New certificate(s) generated
    '''

    # generate a key for postgres and be sure the ownship is correct
    dirname = '/var/local/projects/goodcrypto/server/data/db/postgresql'
    if os.path.exists(dirname) and not os.path.islink(dirname):
        generate_certificate(domain, dirname, private_key_name='server.key', public_cert_name='server.crt')
        sh.chown('postgres:postgres', os.path.join(dirname, 'server.crt'))
        move_private_key(dirname, 'server.key')
        sh.chown('postgres:postgres', os.path.join(dirname, 'server.key'))

    return 'New certificate(s) generated'
    def initServiceToUser(self, request, user):
        try:
            username = user.username
            homedirectory = '/home/%s' % (username)
            useradd(username, '-m')
            chown('-R', username, homedirectory)

        except ErrorReturnCode:
            messages.error(request,
                           "LocalAccountError: User creation failed. Check if user has sudo rights, that runs this application.")
            user.delete()
            return False
        local = LocalAccount()
        tmp = Service.objects.create(user=user, servicetype=local.getServiceType(), state='a')
        tmp.save()
        messages.success(request, "User is successfully created.")
        return True
Exemple #27
0
    def create_prefix_from_master(
        self, master_prefix: str, prefix: str
    ) -> None:
        master_prefix_name = get_prefix_name_from_path(master_prefix)
        prefix_name = get_prefix_name_from_path(prefix)

        print(' > Creating prefix snapshot')
        with sh.contrib.sudo:
            sh.btrfs.subvolume.snapshot(
                master_prefix_name, prefix_name, _cwd=prefix_dir)

        assert os.path.exists(prefix), \
            f'"{prefix}" does not exist'

        print(' > Set owner to current user')
        with sh.contrib.sudo:
            sh.chown(f'{getpass.getuser()}:users', prefix)
Exemple #28
0
def main(domain='goodcrypto.private.server.website'):
    '''
        Generate postgresql certficate.

        >>> main(domain='test.domain.com')
        New certificate(s) generated
    '''

    # generate a key for postgres and be sure the ownship is correct
    dirname = '/var/local/projects/goodcrypto/server/data/db/postgresql'
    if os.path.exists(dirname) and not os.path.islink(dirname):
        generate_certificate(domain,
                             dirname,
                             private_key_name='server.key',
                             public_cert_name='server.crt')
        sh.chown('postgres:postgres', os.path.join(dirname, 'server.crt'))
        move_private_key(dirname, 'server.key')
        sh.chown('postgres:postgres', os.path.join(dirname, 'server.key'))

    return 'New certificate(s) generated'
Exemple #29
0
    def initSvn(self, file_content):
        from sh import svn, chmod, chown
        repourl = Config.SVN_ADDR + self.name
        hook_content = r'''#!/bin/bash
export LC_CTYPE=en_US.UTF-8
export LANG=en_US.UTF-8
svn up %s
''' % self.userhome
        #auto update with hook
        os.chdir(os.path.join(self.user_repo, 'hooks'))
        with open('post-commit', 'w') as f: f.write(hook_content)
        chmod('-R', 777, self.user_repo)
        #checkout add ci for init
        svn('co', '--non-interactive', '--trust-server-cert', repourl, self.userhome)
        chown('-R', Config.HTTPD_USER + ':' + Config.HTTPD_GROUP, self.userhome)
        os.chdir(self.userhome)
        with open('index.html', 'w') as f:
            f.write(file_content)
        svn('add', 'index.html')
        svn('ci', '--username', self.name, '--password', self.passwd, '--non-interactive', '--trust-server-cert', '-m', 'init commit', '--force-log')
def tljh_config_post_install(config):
    """
    Configure /srv/scratch and change configs/mods
    """
    ### mkdir -p /srv/scratch
    ### sudo chown  root:jupyterhub-users /srv/scratch
    ### sudo chmod 777 /srv/scratch
    ### sudo chmod g+s /srv/scratch
    ### sudo ln -s /srv/scratch /etc/skel/scratch
    sh.mkdir('/srv/scratch', '-p')
    # jupyterhub-users doesn't get created until a user logs in
    # make sure it's created before changing permissions on directory
    ensure_group('jupyterhub-users') 
    sh.chown('root:jupyterhub-users', '/srv/scratch')
    sh.chmod('777', '/srv/scratch')
    sh.chmod('g+s', '/srv/scratch')
    sh.ln('-s', '/srv/scratch', '/etc/skel/scratch')

    
    
    
Exemple #31
0
    def _install_php(self):
        for x in ['conf', 'fcgi']:
            os.makedirs(os.path.join(self.vhost_dir, x), 0755)

        global_updates = os.path.join(self.globals_dir, 'vhost.ini')
        vhost_updates = os.path.join(self.vhost_dir, 'conf/vhost.ini')
        vhost_ini = os.path.join(self.vhost_dir, 'conf/php.ini')

        if os.path.isfile(global_updates):
            shutil.copy(global_updates, vhost_updates)
            #merge(None, vhost_updates, vhost_ini, vhost_dir=self.vhost_dir)
            mergeini("-v", self.vhost_dir, "-o", vhost_ini, global_updates,
                     vhost_updates)

        d = {'vhost_dir': self.vhost_dir}
        with open(os.path.join(self.vhost_dir, 'fcgi/fcgi.conf'), 'w') as f:
            f.write(fcgi_conf_template % d)

        starter_file = os.path.join(self.vhost_dir, 'fcgi/fcgi-starter')
        with open(starter_file, 'w') as f:
            f.write(fcgi_starter_template % d)

        for x in ['conf', 'fcgi']:
            chown("root:root", os.path.join(self.vhost_dir, x), '-R')

        chmod(555, starter_file)
        owner = "%s:%s" % (self.user, self.group)
        chown(owner, os.path.join(self.vhost_dir, 'fcgi'))
        chown(owner, starter_file)
        chattr('+i', starter_file)
Exemple #32
0
	def _install_php(self):
		for x in ['conf', 'fcgi']:
			os.makedirs(os.path.join(self.vhost_dir, x), 0755)

		global_updates = os.path.join(self.globals_dir, 'vhost.ini')
		vhost_updates = os.path.join(self.vhost_dir, 'conf/vhost.ini')
		vhost_ini = os.path.join(self.vhost_dir, 'conf/php.ini')

		if os.path.isfile(global_updates):
			shutil.copy(global_updates, vhost_updates)
			#merge(None, vhost_updates, vhost_ini, vhost_dir=self.vhost_dir)
			mergeini("-v", self.vhost_dir, "-o", vhost_ini, global_updates, vhost_updates)

		d = {
			'vhost_dir': self.vhost_dir
		}
		with open(os.path.join(self.vhost_dir, 'fcgi/fcgi.conf'), 'w') as f:
			f.write(fcgi_conf_template % d)

		starter_file = os.path.join(self.vhost_dir, 'fcgi/fcgi-starter')
		with open(starter_file, 'w') as f:
			f.write(fcgi_starter_template % d)

		for x in ['conf', 'fcgi']:
			chown("root:root", os.path.join(self.vhost_dir, x), '-R')

		chmod(555, starter_file)
		owner = "%s:%s" % (self.user, self.group)
		chown(owner, os.path.join(self.vhost_dir, 'fcgi'))
		chown(owner, starter_file)
		chattr('+i', starter_file)
Exemple #33
0
def main():

    data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..', 'data'))
    log('data dir: {}'.format(data_dir))

    # gpg insists on these permissions
    oce_gpg_dir = '{}/oce/.gnupg'.format(data_dir)
    if os.path.exists(oce_gpg_dir):
        sh.chmod('--recursive', 'go-rwx', oce_gpg_dir)
        log('prepared oce')

    if USE_POSTGRESQL:
        # postgres must own its config dir
        sh.chown('-h', '--recursive', 'postgres:postgres', '/etc/postgresql')

    # we need a dir for other persistent data that is not normally in data_dir)
    persistent_dir = '{}/persistent'.format(data_dir)
    if not os.path.exists(persistent_dir):
        sh.mkdir('--parents', persistent_dir)
    sh.chown('goodcrypto:goodcrypto', persistent_dir)
    sh.chmod('g+rx', persistent_dir)
    sh.chmod('o-rwx', persistent_dir)
    log('prepared {}'.format(persistent_dir))

    # root should own these subdirectories
    django_dir = '{}/django'.format(persistent_dir)
    if not os.path.exists(django_dir):
        sh.mkdir(django_dir)
    sh.chown('goodcrypto:goodcrypto', django_dir)
    sh.chmod('go-rwx', django_dir)

    return 0
Exemple #34
0
def chown(owner, path, recursive=False):
    ''' Change owner of path.

        'owner' can be the user name, uid as an int, or uid as a string.

        Log and reraise any exception.
    '''

    # import delayed to avoid infinite recursion
    import syr.user

    try:
        if recursive:
            sh.chown('--recursive', owner, path)
        else:
            sh.chown(owner, path)
        #log.debug('chown(owner={}, path=={})'.format(owner, path))
    except sh.ErrorReturnCode as sh_exception:
        log.error('unable to chown: user={}, owner={}, path={}'.
            format(syr.user.whoami(), owner, path))
        log.error(sh_exception)
        raise

    # verify. after we have higher confidence, move this into doctests
    if type(owner) is str and ':' in owner:
        owner, group = owner.split(':')
    else:
        group = None
    try:
        uid = int(owner)
    except ValueError:
        uid = syr.user.getuid(owner)
    assert getuid(path) == uid, 'uid set to {} but is {}'.format(uid, getuid(path))
    if group is not None:
        try:
            gid = int(group)
        except ValueError:
            gid = syr.user.getgid(group)
        assert getgid(path) == gid, 'gid set to {} but is {}'.format(gid, getgid(path))
def make_or_wipe_server_side_subversion_repo(svn_parent_root, repo_name, compression, deltification, rep_sharing):

    if not str(svn_parent_root).endswith("/"):
        svn_parent_root += "/"

    # Wipe the Subversion repo
    sh.rm("-rf", svn_parent_root + repo_name)
    if not os.path.exists(svn_parent_root):
      os.makedirs(svn_parent_root)
    sh.svnadmin("create", svn_parent_root + repo_name)
    sh.chown("-R", "www-data:www-data", svn_parent_root + repo_name)
    sh.chmod("-R", "755", svn_parent_root + repo_name)
    sh.sync()
    for line in fileinput.FileInput(svn_parent_root + repo_name + "/db/fsfs.conf", inplace=True):
        if compression and "# compression-level" in line:
            print("compression-level = 0")
        elif deltification and "# max-deltification-walk" in line:
            print("max-deltification-walk = 0")
        elif rep_sharing and "# enable-rep-sharing" in line:
            print("enable-rep-sharing = false")
        else:
            print(line)
Exemple #36
0
def generate_certificate(
  domain, dirname, private_key_name=PRIVATE_KEY, public_cert_name=PUBLIC_CERT, name=None, days=365):
    '''
        Generate a self-signed SSL certficate.

        Writes the public cert to the file dirname/public_cert_name.
        Creates a dir dirname/private. Writes the private key to
        dirname/private/private_key_name.

        >>> generate_certificate('test.domain.com', '/tmp')
    '''

    if name is None:
        name = domain

    log('starting to generate certificate for {}'.format(name))

    if not os.path.exists(dirname):
        os.mkdir(dirname)
        log('created {}'.format(dirname))

    private_dirname = os.path.join(dirname, 'private')
    if not os.path.exists(private_dirname):
        os.mkdir(private_dirname)
        log('created {}'.format(private_dirname))
    try:
        sh.chown('root:ssl-cert', private_dirname)
    except:
        try:
            sh.chown('root:root', private_dirname)
        except:
            pass
    sh.chmod('go-rwx', private_dirname)

    delete_old_cert(domain, dirname, private_key_name, public_cert_name)
    gen_private_key(domain, dirname, private_key_name)
    gen_csr(domain, dirname, name, private_key_name)
    gen_cert(domain, dirname, private_key_name, public_cert_name, days)
    log('created certificate for {}'.format(domain))
Exemple #37
0
def handle_bootimg(filename):
    global KEEPSTUFF
    name = getBasename(filename)
    if (name[:4] in ['boot', 'hosd', 'BOOT']
            or name[:8] in ['recovery', 'fastboot', 'RECOVERY']
            or name[:9] == 'droidboot' or name[:10] == 'okrecovery'
            or name[-4:] == '.bin'):
        subprocess.run([IMGTOOL, filename, 'extract'])
        os.chdir('extracted')
        format_ = getFormat('ramdisk')
        if (format_ == 'LZ4'):
            subprocess.run(['unlz4', 'ramdisk', 'ramdisk.out'], shell=True)
            subprocess.run(['cat', 'ramdisk.out', '|', 'cpio', '-i'],
                           shell=True)
            os.remove('ramdisk.out')
        elif (format_ == 'gzip'):
            cpio(gunzip('ramdisk', '-c'), '-i')
        rm('ramdisk')
        os.chdir('..')
        find_output = find('extracted',
                           '-print0').stdout.decode('utf-8').splitlines()
        for line in find_output:
            if (os.path.isfile(line)):
                format_ = getFormat('ramdisk')
                if (format_ == 'gzip'):
                    mv(line, line, '.gz')
                    gunzip('-f', line, '.gz')
                    result = at_extract(line)
                else:
                    result = at_extract(line)
                print(line + "processed: " + result)
        if (KEEPSTUFF == 1):
            cp('-r', 'extracted', MY_FULL_DIR + '/' + SUB_DIR + '/' + name)
            chown('-R', EXTUSER + ':' + EXTGROUP,
                  MY_FULL_DIR + '/' + SUB_DIR + '/' + name)
        shutil.rmtree("extracted")
    else:
        handle_binary(filename)
Exemple #38
0
def copy_src(installDir, release):
    _pull_release(release)
    filename    = CACHE_DIR + release_filename(release)
    extract_dir = CACHE_DIR + release_extract_dir(release)
    if not os.path.isdir( extract_dir ):
        log( "Extracting {} ".format( filename ) )
        unzip("-n", "-q", filename, "-d", extract_dir)

    
    log( "Removing old files ... " )
    rm("-rf", installDir)

    log( "Creating install dir {} ... ".format(installDir) )
    mkdir("-p", installDir )

    log( "Copying files ... " )
    unzip("-n", "-q", extract_dir + '/prestashop.zip' , "-d", installDir)

    log( "Renaming admin as {}".format(ADMIN_DIR) )
    mv(installDir + 'admin', installDir + ADMIN_DIR)

    chown("-R", APP_OWNER, installDir)
    chmod("-R", "777", installDir + 'var/')
Exemple #39
0
    def initGit(self, file_content):
        from sh import git, chmod, chown
        git_repourl = 'git@' + Config.GIT_SVR + ':' + self.user_gitrepo
        os.chdir(Config.SDP_USER_DATA_HOME)
        git('clone', git_repourl)
        #git of add ci push
        os.chdir(self.userhome)
        with open('index.html', 'w') as f:
            f.write(file_content)
        git('add', 'index.html')
        git('commit', '-m', 'init commit')
        git('push', 'origin', 'master')
        #git of hooks, for update code
        post_update_content = """#!/bin/bash
#Automatically update the project code, if there is an automatic update error, will re deploy the code.
unset $(git rev-parse --local-env-vars)
DeployPath=%s
echo -e "\033[33mDeploy user is => %s\033[0m"
[ ! -d $DeployPath ] && echo -e "\033[31mDirectory $DeployPath does not exist!\033[0m" && exit 1
cd $DeployPath
git pull
if test $? -ne 0;then
    echo -e "\033[31mAutomatic pull fail, try to re deploy!\033[0m"
    cd ~
    rm -rf ${DeployPath}/*
    rm -rf ${DeployPath}/.git
    git clone %s $DeployPath
    [ $? -ne 0 ] && echo -e "\033[31mRedeploy fail, quit!\033[0m" && exit 1
fi
echo -e "\033[32mAutomatic deployment complete.\033[0m"
exit 0""" % (self.userhome, self.name, git_repourl)
        with open(os.path.join(self.user_gitrepo, 'hooks/post-update'),
                  'w') as f:
            f.write(post_update_content)
        chmod('a+x', os.path.join(self.user_gitrepo, 'hooks/post-update'))
        chown('-R', Config.GIT_USER, self.userhome)
Exemple #40
0
def LicheePiImage(workdir, boot_files, kernel_files, rootfs_files):
    mkdir('-p', workdir)
    IMAGE_NAME = 'sdcard.img'
    IMAGE_PATH = str(Path(workdir).joinpath(IMAGE_NAME))

    dd('if=/dev/zero', 'of={}'.format(IMAGE_PATH), 'bs=1M', 'count=300')

    loop_dev = str(losetup('-f')).split()[0]
    losetup(loop_dev, IMAGE_PATH)
    sfdisk(cat(_in='1M,16M,c\n,,L'), loop_dev)
    partx('-u', loop_dev)
    mkfsvfat('{}p1'.format(loop_dev))
    mkfsext4('{}p2'.format(loop_dev))
    dd('if=/dev/zero', 'of={}'.format(loop_dev), 'bs=1K', 'seek=1',
       'count=1023')
    dd('if={}'.format(boot_files['bootstrap']), 'of={}'.format(loop_dev),
       'bs=1K', 'seek=8')
    sync()
    mkdir('-p', '/tmp/p1')
    mkdir('-p', '/tmp/p2')
    mount('{}p1'.format(loop_dev), '/tmp/p1')
    mount('{}p2'.format(loop_dev), '/tmp/p2')
    cp(boot_files['bin'], '/tmp/p1/')
    cp(kernel_files['bin'], '/tmp/p1/')
    cp(kernel_files['dtb'], '/tmp/p1/')
    mkimage('-C', 'none', '-A', 'arm', '-T', 'script', '-d',
            './resources/boot.cmd', '/tmp/p1/boot.scr')

    rsync('-r', '--links', rootfs_files['rootdir'] + '/', '/tmp/p2/')
    mkdir('-p', '/tmp/p2/etc/init.d')
    mkdir('-p', '/tmp/p2/proc')
    mkdir('-p', '/tmp/p2/dev')
    mkdir('-p', '/tmp/p2/sys')
    mkdir('-p', '/tmp/p2/var')
    touch('/tmp/p2/etc/init.d/rcS')
    chown('-R', 'root:root', '/tmp/p2/')
def handle_bootimg(filename):
    name = getBasename(filename)
    if (name[:4] in ['boot', 'hosd', 'BOOT']
            or name[:8] in ['recovery', 'fastboot', 'RECOVERY']
            or name[:9] == 'droidboot' or name[:10] == 'okrecovery'
            or name[-4:] == '.bin'):
        subprocess.run([IMGTOOL, filename, 'extract'])
        os.chdir('extracted')
        format_ = getFormat('ramdisk')
        if (format_ == 'LZ4'):
            subprocess.run(['unlz4', 'ramdisk', 'ramdisk.out'], shell=True)
            subprocess.run(['cat', 'ramdisk.out', '|', 'cpio', '-i'],
                           shell=True)
            os.remove('ramdisk.out')
        elif (format_ == 'gzip'):
            cpio(gunzip('ramdisk', '-c'), '-i')
        shutil.rmtree('ramdisk')
        os.chdir('..')
        cp('-r', 'extracted', MY_FULL_DIR + '/' + SUB_DIR + '/' + name)
        chown('-R', EXTUSER + ':' + EXTGROUP,
              MY_FULL_DIR + '/' + SUB_DIR + '/' + name)
        shutil.rmtree("extracted")
    else:
        print('This image file is not known and should be handled as a binary')
Exemple #42
0
def make_installer(cfg):
    """ """
    print("Building installer...")
    build_dir = 'build/{name}-{version}'.format(**cfg)
    cfg.update({'build_dir': build_dir})
    install_dir = '{build_dir}/usr/share/{name}'.format(**cfg)
    desktop_dir = '{build_dir}/usr/share/applications/'.format(**cfg)
    cfg.update({'install_dir': install_dir, 'desktop_dir': desktop_dir})

    os.makedirs(build_dir)

    with cd(build_dir):
        os.makedirs('DEBIAN')

        #: Write control
        with open('DEBIAN/control', 'w') as f:
            f.write(CONTROL_TEMPLATE.format(**cfg))

    #: Write
    os.makedirs(install_dir)
    print(sh.cp('-R', glob('build/exe.linux-x86_64-3.5/*'), install_dir))

    #: Make a simlink to /usr/local/bin
    #print(sh.ln('-sf', '{install_dir}/{name}'.format(**cfg),
    #            '{install_dir}/usr/local/bin/{name}'.format(**cfg)))

    #: Make a desktop icon /usr/share/applications
    os.makedirs(desktop_dir)
    print(sh.cp('{name}/res/declaracad.desktop'.format(**cfg), desktop_dir))

    #: Prepare
    try:
        print(sh.chown('-R', 'root:root', build_dir))
    except:
        pass

    #: Build it
    deb = sh.Command('dpkg-deb')
    print(deb('--build', build_dir))
Exemple #43
0
def StartWeb(**user):
    if not isinstance(user, (dict)):
        raise TypeError("StartAll need a dict(user).")

    name, passwd, time, service, email = user["name"], user["passwd"], str(user["time"]), user["service"], user["email"]
    dn = name + Config.DN_BASE
    PORT, image = Public.Handler(service)
    userhome = os.path.join(Config.SDP_USER_DATA_HOME, name)

    # docker network mode is 'bridge' or 'host'(not allow none and ContainerID)
    if user["network"] != None:
        docker_network_mode = user["network"]
    else:
        if Config.DOCKER_NETWORK not in ["bridge", "host"]:
            raise TypeError("Unsupport docker network mode")
        else:
            docker_network_mode = Config.DOCKER_NETWORK

    # define other code type, default is only ftp, disable svn and git, but SVN and git can't exist at the same time!
    if user["enable_svn"] != None:
        enable_svn = True
    else:
        enable_svn = False

    svn_repo = Config.SVN_ADDR + name

    """
    Git是什么?
    Git是目前世界上最先进的分布式版本控制系统(没有之一)。
    Git有什么特点?简单来说就是:高端大气上档次!
    The Git feature that really makes it stand apart from nearly every other SCM out there is its branching model.
    Git allows and encourages you to have multiple local branches that can be entirely independent of each other. The creation, merging, and deletion of those lines of development takes seconds.
    """
    if user["enable_git"] != None:
        enable_git = True
        git_repo = "git@" + Config.GIT_SVR + ":" + os.path.join(Config.GIT_ROOT, name) + ".git"
    else:
        enable_git = False

    # make repo info, make a choice
    if enable_svn == False:
        if enable_git == False:
            repos = "None"
        else:
            repos = "git=> " + git_repo
    if enable_svn == True:
        if enable_git == True:
            # repos = "svn=> " + svn_repo, "git=> "+git_repo
            print "\033[0;31;40mSorry...You have to make a choice between SVN and git, and you don't have to choose all of the options.\033[0m"
            exit()
        else:
            repos = "svn=> " + svn_repo

    userinfo_user = r"""
Dear %s, 以下是您的SdpCloud服务使用信息!
账号: %s
密码: %s
使用期: %d个月
服务类型: %s
验证邮箱: %s
用户域名: %s
版本库信息: %s

更多使用方法请查询官方文档(www.saintic.com),祝您使用愉快。 如果有任何疑惑,欢迎与我们联系:
邮箱: [email protected]
官网: http://www.saintic.com/
问题: https://github.com/saintic/Sdp/issues""" % (
        name,
        name,
        passwd,
        int(time),
        service,
        email,
        dn,
        str(repos),
    )

    userinfo_welcome = r"""<!DOCTYPE html>
<html>
<head>
<title>User information for SdpCloud!</title>
</head>
<body>
<h1><center>Welcome %s:</center></h1>
<p>账号: %s</p>
<p>密码: %s</p>
<p>使用期: %d个月</p>
<p>服务类型: %s</p>
<p>验证邮箱: %s</p>
<p>用户域名: %s</p>
<p>版本库信息: %s</p>

<p>这是一个欢迎页面,请尽快使用FTP、SVN或Git覆盖此页面!</p>
<p><em>Thank you for using SdpCloud.</em></p>
</body>
</html>""" % (
        name,
        name,
        passwd,
        int(time),
        service,
        email,
        dn,
        str(repos),
    )

    userinfo_admin = {
        "name": name,
        "passwd": passwd,
        "time": int(time),
        "service": service,
        "email": email,
        "image": image,
        "ip": "127.0.0.1",
        "port": int(PORT),
        "dn": dn,
        "userhome": userhome,
        "repo": str(repos.split()[-1]),
        "expiretime": Public.Time(m=time),
        "network": docker_network_mode,
    }

    # define instances for writing redis and sending email.
    rc = Redis.RedisObject()
    ec = Mail.SendMail()
    if rc.ping():
        import Source
        from sh import svn, chmod, chown

        Code = Source.CodeManager(**userinfo_admin)
        Code.ftp()

        if enable_svn == False and enable_git == False:
            os.mkdir(userhome)
            with open(os.path.join(userhome, "index.html"), "w") as f:
                f.write(userinfo_welcome)
            chown("-R", Config.FTP_VFTPUSER + ":" + Config.FTP_VFTPUSER, userhome)
            chmod("-R", "a+t", userhome)

        if enable_svn == True:
            Code.CreateApacheSvn()
            Code.initSvn(userinfo_welcome)

        if enable_git == True:
            Code.Git()
            Code.initGit(userinfo_welcome)

        Code.Proxy()
        Dk = Docker.Docker(
            **{
                "image": image,
                "name": name,
                "port": Config.PORTNAT["web"],
                "bind": ("127.0.0.1", PORT),
                "volume": userhome,
            }
        )
        cid = Dk.Create(mode=docker_network_mode)
        Dk.Start(cid)
        userinfo_admin["container"] = cid
        rc.hashset(**userinfo_admin)
        ec.send(name, email, userinfo_user)
    else:
        # raise an error for RedisConnectError(Error.py)
        print "\033[0;31;40mConnect Redis Server Error,Quit.\033[0m"
        sys.exit(7)
Exemple #44
0
def main():
    log = logging.getLogger("zulip-provisioner")
    # TODO: support other architectures
    if platform.architecture()[0] == '64bit':
        arch = 'amd64'
    else:
        log.critical("Only amd64 is supported.")

    vendor, version, codename = platform.dist()

    if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
        log.critical("Unsupported platform: {} {}".format(vendor, codename))

    with sh.sudo:
        sh.apt_get.update(**LOUD)

        sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)

    temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)

    sh.wget(
        "{}/{}_{}_{}.deb".format(
            TSEARCH_URL_BASE,
            TSEARCH_PACKAGE_NAME["trusty"],
            TSEARCH_VERSION,
            arch,
        ),
        output_document=temp_deb_path,
        **LOUD
    )

    with sh.sudo:
        sh.dpkg("--install", temp_deb_path, **LOUD)

    with sh.sudo:
        PHANTOMJS_PATH = "/srv/phantomjs"
        PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64.tar.bz2")
        sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
        sh.wget("https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2",
                output_document=PHANTOMJS_TARBALL, **LOUD)
        sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
        sh.ln("-sf", os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64", "bin", "phantomjs"),
              "/usr/local/bin/phantomjs", **LOUD)

    with sh.sudo:
        sh.rm("-rf", VENV_PATH, **LOUD)
        sh.mkdir("-p", VENV_PATH, **LOUD)
        sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)

    sh.virtualenv(VENV_PATH, **LOUD)

    # Add the ./tools and ./scripts/setup directories inside the repository root to
    # the system path; we'll reference them later.
    orig_path = os.environ["PATH"]
    os.environ["PATH"] = os.pathsep.join((
            os.path.join(ZULIP_PATH, "tools"),
            os.path.join(ZULIP_PATH, "scripts", "setup"),
            orig_path
    ))


    # Put Python virtualenv activation in our .bash_profile.
    with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
        bash_profile.writelines([
            "source .bashrc\n",
            "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
        ])

    # Switch current Python context to the virtualenv.
    activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
    execfile(activate_this, dict(__file__=activate_this))

    sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)

    with sh.sudo:
        sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)

    # Add additional node packages for test-js-with-node.
    with sh.sudo:
        sh.npm.install(*NPM_DEPENDENCIES["trusty"], g=True, prefix="/usr", **LOUD)

    # Management commands expect to be run from the root of the project.
    os.chdir(ZULIP_PATH)

    os.system("tools/download-zxcvbn")
    os.system("tools/emoji_dump/build_emoji")
    os.system("generate_secrets.py -d")
    sh.configure_rabbitmq(**LOUD)
    sh.postgres_init_db(**LOUD)
    sh.do_destroy_rebuild_database(**LOUD)
    sh.postgres_init_test_db(**LOUD)
    sh.do_destroy_rebuild_test_database(**LOUD)
Exemple #45
0
def main():
    log = logging.getLogger("zulip-provisioner")

    if platform.architecture()[0] == '64bit':
        arch = 'amd64'
        phantomjs_arch = 'x86_64'
    elif platform.architecture()[0] == '32bit':
        arch = "i386"
        phantomjs_arch = 'i686'
    else:
        log.critical("Only x86 is supported; ping [email protected] if you want another architecture.")
        sys.exit(1)

    vendor, version, codename = platform.dist()

    if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
        log.critical("Unsupported platform: {} {}".format(vendor, codename))

    with sh.sudo:
        sh.apt_get.update(**LOUD)

        sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)

    temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)

    sh.wget(
        "{}/{}_{}_{}.deb".format(
            TSEARCH_URL_BASE,
            TSEARCH_PACKAGE_NAME["trusty"],
            TSEARCH_VERSION,
            arch,
        ),
        output_document=temp_deb_path,
        **LOUD
    )

    with sh.sudo:
        sh.dpkg("--install", temp_deb_path, **LOUD)

    with sh.sudo:
        PHANTOMJS_PATH = "/srv/phantomjs"
        PHANTOMJS_BASENAME = "phantomjs-1.9.8-linux-%s" % (phantomjs_arch,)
        PHANTOMJS_TARBALL_BASENAME = PHANTOMJS_BASENAME + ".tar.bz2"
        PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, PHANTOMJS_TARBALL_BASENAME)
        PHANTOMJS_URL = "https://bitbucket.org/ariya/phantomjs/downloads/%s" % (PHANTOMJS_TARBALL_BASENAME,)
        sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
        if not os.path.exists(PHANTOMJS_TARBALL):
            sh.wget(PHANTOMJS_URL, output_document=PHANTOMJS_TARBALL, **LOUD)
        sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
        sh.ln("-sf", os.path.join(PHANTOMJS_PATH, PHANTOMJS_BASENAME, "bin", "phantomjs"),
              "/usr/local/bin/phantomjs", **LOUD)

    with sh.sudo:
        sh.rm("-rf", VENV_PATH, **LOUD)
        sh.mkdir("-p", VENV_PATH, **LOUD)
        sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)

    sh.virtualenv(VENV_PATH, **LOUD)

    # Add the ./tools and ./scripts/setup directories inside the repository root to
    # the system path; we'll reference them later.
    orig_path = os.environ["PATH"]
    os.environ["PATH"] = os.pathsep.join((
            os.path.join(ZULIP_PATH, "tools"),
            os.path.join(ZULIP_PATH, "scripts", "setup"),
            orig_path
    ))


    # Put Python virtualenv activation in our .bash_profile.
    with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
        bash_profile.writelines([
            "source .bashrc\n",
            "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
        ])

    # Switch current Python context to the virtualenv.
    activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
    execfile(activate_this, dict(__file__=activate_this))

    sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)

    with sh.sudo:
        sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)

    # npm install and management commands expect to be run from the root of the project.
    os.chdir(ZULIP_PATH)

    sh.npm.install(**LOUD)

    os.system("tools/download-zxcvbn")
    os.system("tools/emoji_dump/build_emoji")
    os.system("generate_secrets.py -d")
    if "--travis" in sys.argv:
        os.system("sudo service rabbitmq-server restart")
        os.system("sudo service redis-server restart")
        os.system("sudo service memcached restart")
    elif "--docker" in sys.argv:
        os.system("sudo service rabbitmq-server restart")
        os.system("sudo pg_dropcluster --stop 9.3 main")
        os.system("sudo pg_createcluster -e utf8 --start 9.3 main")
        os.system("sudo service redis-server restart")
        os.system("sudo service memcached restart")
    sh.configure_rabbitmq(**LOUD)
    sh.postgres_init_dev_db(**LOUD)
    sh.do_destroy_rebuild_database(**LOUD)
    sh.postgres_init_test_db(**LOUD)
    sh.do_destroy_rebuild_test_database(**LOUD)
    return 0
Exemple #46
0
def main():
    log = logging.getLogger("zulip-provisioner")
    # TODO: support other architectures
    if platform.architecture()[0] == '64bit':
        arch = 'amd64'
    else:
        log.critical("Only amd64 is supported.")

    vendor, version, codename = platform.dist()

    if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
        log.critical("Unsupported platform: {} {}".format(vendor, codename))

    with sh.sudo:
        sh.apt_get.update(**LOUD)

        sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)

    temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)

    sh.wget(
        "{}/{}_{}_{}.deb".format(
            TSEARCH_URL_BASE,
            TSEARCH_PACKAGE_NAME["trusty"],
            TSEARCH_VERSION,
            arch,
        ),
        output_document=temp_deb_path,
        **LOUD
    )

    with sh.sudo:
        sh.dpkg("--install", temp_deb_path, **LOUD)

    with sh.sudo:
        PHANTOMJS_PATH = "/srv/phantomjs"
        PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64.tar.bz2")
        sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
        sh.wget("https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2",
                output_document=PHANTOMJS_TARBALL, **LOUD)
        sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
        sh.ln("-sf", os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64", "bin", "phantomjs"),
              "/usr/local/bin/phantomjs", **LOUD)

    with sh.sudo:
        sh.rm("-rf", VENV_PATH, **LOUD)
        sh.mkdir("-p", VENV_PATH, **LOUD)
        sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)

    sh.virtualenv(VENV_PATH, **LOUD)

    # Add the ./tools and ./scripts/setup directories inside the repository root to
    # the system path; we'll reference them later.
    orig_path = os.environ["PATH"]
    os.environ["PATH"] = os.pathsep.join((
            os.path.join(ZULIP_PATH, "tools"),
            os.path.join(ZULIP_PATH, "scripts", "setup"),
            orig_path
    ))


    # Put Python virtualenv activation in our .bash_profile.
    with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
        bash_profile.writelines([
            "source .bashrc\n",
            "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
        ])

    # Switch current Python context to the virtualenv.
    activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
    execfile(activate_this, dict(__file__=activate_this))

    sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)

    with sh.sudo:
        sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)

    # npm install and management commands expect to be run from the root of the project.
    os.chdir(ZULIP_PATH)

    sh.npm.install(**LOUD)

    os.system("tools/download-zxcvbn")
    os.system("tools/emoji_dump/build_emoji")
    os.system("generate_secrets.py -d")
    if "--travis" in sys.argv:
        os.system("sudo service rabbitmq-server restart")
        os.system("sudo service redis-server restart")
        os.system("sudo service memcached restart")
    sh.configure_rabbitmq(**LOUD)
    sh.postgres_init_dev_db(**LOUD)
    sh.do_destroy_rebuild_database(**LOUD)
    sh.postgres_init_test_db(**LOUD)
    sh.do_destroy_rebuild_test_database(**LOUD)
Exemple #47
0
def main(args):
    """Called below by __main__ """
    container_name = 'biobakery/nephele2'
    exit_status = 0

    try:
        pipe = BiobakeryPipe(args)
        # read in mapping file
        samples = pipe.gen_samples(args.map_file.name, args.data_type)

        # check for paired end data and merge files, if needed
        if pipe.args.data_type == 'WGS_PE':
            pipe.log.info('Renaming paired end files.')
            inputs_dir = pipe.rename_paired_end_files(pipe.inputs_dir, pipe.outputs_dir, samples,
                                                      pipe.args.file_ext)
        else:
            inputs_dir = pipe.inputs_dir

        pipe.log.info('Inputs directory: ' + inputs_dir)

        pipe.log.info('Running Whole Metagenome Shotgun Workflow (wmgx).')
        cmnd = pipe.gen_wmgx_cmd(inputs_dir, pipe.outputs_dir, pipe.args.strainphlan, pipe.args.threads,
                                 pipe.args.file_ext, pipe.args.local_jobs, pipe.args.keep)
        docker_cmnd = pipe.gen_docker_cmnd(cmnd, pipe.base_dir, container_name)
        pipe.log.info(docker_cmnd)
        pipe.exec_docker_cmnd(docker_cmnd)

        # check we have at least 3 samples to run wmgx_vis
        if len(samples) > 2:
            # make the visualization outputs directory
            pipe.log.info('Create wmgx_vis output directory: ' +
                          pipe.visoutputs_dir)
            sh.mkdir('-p', pipe.visoutputs_dir)
            sh.chmod('777', pipe.visoutputs_dir)
            sh.chown('www-data:www-data', pipe.visoutputs_dir)
            # os.makedirs(pipe.visoutputs_dir, mode=0o777, exist_ok=True)

            pipe.log.info(
                'Checking output files from wmgx workflow that are required by wmgx_vis workflow.')
            pipe.check_wmgx_outputs(pipe.outputs_dir)

            pipe.log.info(
                'Running Visualization for Whole Metagenome Shotgun Workflow (wmgx_vis).')
            cmnd = pipe.gen_wmgx_vis_cmd(pipe.outputs_dir, pipe.visoutputs_dir,
                                         pipe.args.project_name, pipe.args.strainphlan,
                                         pipe.args.threads)

            docker_cmnd = pipe.gen_docker_cmnd(
                cmnd, pipe.base_dir, container_name)
            pipe.log.info(docker_cmnd)
            pipe.exec_docker_cmnd(docker_cmnd)

            pipe.log.info('Checking output files from wmgx_vis pipeline.')
            pipe.check_wmgx_vis_outputs(pipe.visoutputs_dir)
        else:
            pipe.log.info('The Visualization for Whole Metagenome Shotgun Workflow '
                          '(wmgx_vis) will not be run, as at least 3 samples are needed.')

        pipe.log.info('bioBakery WGS pipeline done.')

    except pipeline_error.NecessaryFileNotFound as pp_err:
        pipe.log.error('Pipeline Error:')
        pipe.log.error(pp_err)
        pipe.log.error(
            'A step in the biobakery workflows may have failed. Check anadama.log files.')
        pipe.log.error('')
        exit_status = 1

    except Exception:
        pipe.log.error('Error:')
        pipe.log.error(traceback.format_exc())
        exit_status = 1

    finally:
        if not pipe.args.keep:
            try:
                pipe.log.info('Cleaning up intermediate files.')
                cleanup_files_log = pipe.cleanup_files(pipe.outputs_dir)
                pipe.log.info(cleanup_files_log)
            except Exception:
                pipe.log.error('Error:')
                pipe.log.error(traceback.format_exc())
                exit_status = 1
        exit(exit_status)
Exemple #48
0
 def Git(self):
     from sh import git,chown
     git('init', '--bare', self.user_gitrepo)
     chown('-R', Config.GIT_USER, self.user_gitrepo)
Exemple #49
0
def StartWeb(**user):
    if not isinstance(user, (dict)):
        raise TypeError('StartAll need a dict(user).')

    name, passwd, time, service, email = user['name'], user['passwd'], str(
        user['time']), user['service'], user['email']
    dn = name + Config.DN_BASE
    PORT, image = Public.Handler(service)
    userhome = os.path.join(Config.SDP_USER_DATA_HOME, name)

    #docker network mode is 'bridge' or 'host'(not allow none and ContainerID)
    if user['network'] != None:
        docker_network_mode = user['network']
    else:
        if Config.DOCKER_NETWORK not in ['bridge', 'host']:
            raise TypeError('Unsupport docker network mode')
        else:
            docker_network_mode = Config.DOCKER_NETWORK

    #define other code type, default is only ftp, disable svn and git, but SVN and git can't exist at the same time!
    if user['enable_svn'] != None:
        enable_svn = True
    else:
        enable_svn = False

    svn_repo = Config.SVN_ADDR + name
    """
    Git是什么?
    Git是目前世界上最先进的分布式版本控制系统(没有之一)。
    Git有什么特点?简单来说就是:高端大气上档次!
    The Git feature that really makes it stand apart from nearly every other SCM out there is its branching model.
    Git allows and encourages you to have multiple local branches that can be entirely independent of each other. The creation, merging, and deletion of those lines of development takes seconds.
    """
    if user['enable_git'] != None:
        enable_git = True
        git_repo = 'git@' + Config.GIT_SVR + ':' + os.path.join(
            Config.GIT_ROOT, name) + '.git'
    else:
        enable_git = False

    #make repo info, make a choice
    if enable_svn == False:
        if enable_git == False:
            repos = "None"
        else:
            repos = "git=> " + git_repo
    if enable_svn == True:
        if enable_git == True:
            #repos = "svn=> " + svn_repo, "git=> "+git_repo
            print "\033[0;31;40mSorry...You have to make a choice between SVN and git, and you don't have to choose all of the options.\033[0m"
            exit()
        else:
            repos = "svn=> " + svn_repo

    userinfo_user = r'''
Dear %s, 以下是您的SdpCloud服务使用信息!
账号: %s
密码: %s
使用期: %d个月
服务类型: %s
验证邮箱: %s
用户域名: %s
版本库信息: %s

更多使用方法请查询官方文档(www.saintic.com),祝您使用愉快。 如果有任何疑惑,欢迎与我们联系:
邮箱: [email protected]
官网: http://www.saintic.com/
问题: https://github.com/saintic/Sdp/issues''' % (name, name, passwd, int(time),
                                                service, email, dn, str(repos))

    userinfo_welcome = r'''<!DOCTYPE html>
<html>
<head>
<title>User information for SdpCloud!</title>
</head>
<body>
<h1><center>Welcome %s:</center></h1>
<p>账号: %s</p>
<p>密码: %s</p>
<p>使用期: %d个月</p>
<p>服务类型: %s</p>
<p>验证邮箱: %s</p>
<p>用户域名: %s</p>
<p>版本库信息: %s</p>

<p>这是一个欢迎页面,请尽快使用FTP、SVN或Git覆盖此页面!</p>
<p><em>Thank you for using SdpCloud.</em></p>
</body>
</html>''' % (name, name, passwd, int(time), service, email, dn, str(repos))

    userinfo_admin = {
        "name": name,
        "passwd": passwd,
        "time": int(time),
        "service": service,
        "email": email,
        "image": image,
        "ip": "127.0.0.1",
        "port": int(PORT),
        "dn": dn,
        "userhome": userhome,
        "repo": str(repos.split()[-1]),
        "expiretime": Public.Time(m=time),
        "network": docker_network_mode
    }

    #define instances for writing redis and sending email.
    rc = Redis.RedisObject()
    ec = Mail.SendMail()
    if rc.ping():
        import Source
        from sh import svn, chmod, chown
        Code = Source.CodeManager(**userinfo_admin)
        Code.ftp()

        if enable_svn == False and enable_git == False:
            os.mkdir(userhome)
            with open(os.path.join(userhome, 'index.html'), 'w') as f:
                f.write(userinfo_welcome)
            chown('-R', Config.FTP_VFTPUSER + ':' + Config.FTP_VFTPUSER,
                  userhome)
            chmod('-R', 'a+t', userhome)

        if enable_svn == True:
            Code.CreateApacheSvn()
            Code.initSvn(userinfo_welcome)

        if enable_git == True:
            Code.Git()
            Code.initGit(userinfo_welcome)

        Code.Proxy()
        Dk = Docker.Docker(
            **{
                "image": image,
                "name": name,
                "port": Config.PORTNAT['web'],
                "bind": ('127.0.0.1', PORT),
                "volume": userhome
            })
        cid = Dk.Create(mode=docker_network_mode)
        Dk.Start(cid)
        userinfo_admin["container"] = cid
        rc.hashset(**userinfo_admin)
        ec.send(name, email, userinfo_user)
    else:
        #raise an error for RedisConnectError(Error.py)
        print "\033[0;31;40mConnect Redis Server Error,Quit.\033[0m"
        sys.exit(7)
Exemple #50
0
    def __init__(self,
                 setting_scope: SettingScopeEnum = SettingScopeEnum.site):
        """Protects from overriding on initialisation"""
        pass
        # super().__init__()
        # TODO: FIGURE OUT A WAY TO EASILY COPY SETTINGS TO ROOT; for services
        # print(f'Using settings from {PROJECT_APP_PATH.user_config}')

        _setting_scope = setting_scope
        if setting_scope == SettingScopeEnum.user:
            HeimdallrSettings._credentials_base_path = ensure_existence(
                PROJECT_APP_PATH.user_config / "credentials")
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.user_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.user_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        elif setting_scope == SettingScopeEnum.site:
            prev_val = PROJECT_APP_PATH._ensure_existence
            PROJECT_APP_PATH._ensure_existence = False
            HeimdallrSettings._credentials_base_path = (
                PROJECT_APP_PATH.site_config / "credentials")
            if not HeimdallrSettings._credentials_base_path.exists():
                with sh.contrib.sudo(
                        password=getpass.getpass(
                            prompt=f"[sudo] password for {getpass.getuser()}: "
                        ),
                        _with=True,
                ):
                    sh.mkdir(["-p", HeimdallrSettings._credentials_base_path])
                    sh.chown(
                        f"{getpass.getuser()}:", PROJECT_APP_PATH.site_config
                    )  # If a colon but no group name follows the user name, that user is made the owner of the files and the group of the files is changed to that user's login group.
            PROJECT_APP_PATH._ensure_existence = prev_val
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.site_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.site_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        elif setting_scope == SettingScopeEnum.root:
            prev_val = PROJECT_APP_PATH._ensure_existence
            PROJECT_APP_PATH._ensure_existence = False
            HeimdallrSettings._credentials_base_path = (
                PROJECT_APP_PATH.root_config / "credentials")
            if not HeimdallrSettings._credentials_base_path.exists():
                with sh.contrib.sudo(
                        password=getpass.getpass(
                            prompt=f"[sudo] password for {getpass.getuser()}: "
                        ),
                        _with=True,
                ):
                    sh.mkdir(["-p", HeimdallrSettings._credentials_base_path])
                    sh.chown(
                        f"{getpass.getuser()}:", PROJECT_APP_PATH.root_config
                    )  # If a colon but no group name follows the user name, that user is made the owner of the files and the group of the files is changed to that user's login group.
            PROJECT_APP_PATH._ensure_existence = prev_val
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.root_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.root_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        else:
            raise ValueError()