Ejemplo n.º 1
0
    def push(self):
        docker_image = self.env.data['DOCKER_IMAGE']

        if self.args.dry_run:
            self.logger.info(f'docker push {docker_image}')
        else:
            sh.docker('push', docker_image, _fg=True)
Ejemplo n.º 2
0
 def docker(self, *args, **kwargs):
     """Perform a docker command remotely, using SSH to access the
     remote docker daemon.
     """
     if self.is_localhost():
         return docker(*args, **kwargs)
     return docker("--host", f"ssh://{self.node_address}", *args, **kwargs)
Ejemplo n.º 3
0
def test_two_images():
    sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
    sh.docker('build', '-t', 'bartest', '.')
    f = StringIO()
    Docktree(restrict='footest', file=f).draw_tree()
    assert re.match(
        u'└─ sha256:[a-f0-9]{5} footest:latest\n' +
        u'   └─ sha256:[a-f0-9]{5} bartest:latest\n', f.getvalue())
Ejemplo n.º 4
0
def exit1(usrID, proID):
    try:
        sh.docker("stop", proID + usrID)  #关闭容器
        return {"status": "succeed"}
    except Exception as e:
        error["error"] = str(e)
        # print(error)
        return error
Ejemplo n.º 5
0
def load_config(name: str, path: str) -> None:
    """
    Loads config into swarm
    """
    if name in get_configs():
        remove_config(name)

    sh.docker('config', 'create', name, path)
Ejemplo n.º 6
0
def test_two_images():
    sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
    sh.docker('build', '-t', 'bartest', '.')
    f = StringIO()
    Docktree(restrict='footest', file=f).draw_tree()
    assert re.match(
        u'└─ [a-f0-9]{12} footest:latest\n' +
        u'   └─ [a-f0-9]{12} bartest:latest\n', f.getvalue())
Ejemplo n.º 7
0
 def _image_is_right(self):
     """检查镜像是否可以拉取到"""
     try:
         logger.warning(f"The `{self.images_name}` image is ready to download...")
         sh.docker('pull', self.images_name)
         logger.info(f"The `{self.images_name}` image is exist in docker repo.")
     except sh.ErrorReturnCode_1:
         logger.error(f"The `{self.images_name}` image is not found in docker repo.")
         sys.exit(1)
Ejemplo n.º 8
0
def postgres_server(unmigrated_postgres_server, docker_image):
    sh.docker('run', '--rm', '--link',
              '{}:db'.format(unmigrated_postgres_server), docker_image,
              'alembic', '-c', 'dila/alembic.ini', 'upgrade', 'head')
    yield unmigrated_postgres_server
    sh.docker('run', '--rm', '--net',
              'container:{}'.format(unmigrated_postgres_server), '-e',
              'POSTGRES_USER=dila', '-e', 'POSTGRES_PASSWORD=dila', 'postgres',
              'psql', '-h', 'localhost', '-U', 'dila', '-c',
              'DROP SCHEMA public CASCADE; CREATE SCHEMA public;')
Ejemplo n.º 9
0
def exit2(usrID, proID):
    try:
        sh.docker("stop", proID + usrID)
        sh.docker("rm", proID + usrID)  #关闭并删除容器
        print("hahahahahahha")
        return {"status": "succeed"}
    except Exception as e:
        error["error"] = str(e)
        # print(error)
        return error
Ejemplo n.º 10
0
def exit2(usrID, proID):
    try:
        sh.docker("stop", proID + usrID)
        sh.docker("rm", proID + usrID)  #关闭并删除容器
        sh.rm("-rf", "/home/" + usrID)
        return {"status": "succeed"}
    except Exception as e:
        error["error"] = str(e)
        # print(error)
        return error
def docker_save(cont_dic):
    for c in cont_dic:
        c_nick = c[1]
        #write images to a temporary directory and combine them into a single archive
        c_full_path = '{0}{1}_{2}.tar'.format(BACKUP_TEMP_DIR, c_nick, DATE)
        print(
            '\tdocker save: (this will take awhile...) \n\t\tfile: {0} <- image: {1}'
            .format(c_full_path, c_nick))
        archives.append(c_full_path)
        sh.docker('save', '-o', c_full_path, c_nick)
Ejemplo n.º 12
0
 def delete(self):
     try:
         if self._is_attach_to_container:
             docker('exec', self._attach_to_container_id, 'ip', 'link',
                    'del', self._veth_name)
         else:
             ip('link', 'del', self._veth_name)
     except Exception as e:
         raise VEthDeleteException("delete macvlan eth error:\n{}".format(
             e.message))
Ejemplo n.º 13
0
 def up(self):
     try:
         if self._is_attach_to_container:
             docker('exec', self._attach_to_container_id, 'ip', 'link',
                    'del', self._veth_name)
         else:
             ip('link', 'set', self._veth_name, 'up')
     except Exception as e:
         raise VEthUpException("up macvlan eth error:\n{}".format(
             e.message))
     return self
Ejemplo n.º 14
0
def unmigrated_postgres_server():
    container_name = 'acceptance_test_dila_postgres'
    sh.docker('run', '-d', '-e', 'POSTGRES_USER=dila', '-e',
              'POSTGRES_PASSWORD=dila', '--name', container_name, 'postgres')
    log = sh.docker('logs', '-f', container_name, _iter=True, _ok_code=2)
    for line in log:
        if 'PostgreSQL init process complete; ready for start up.' in line:
            break
    log.terminate()
    yield container_name
    sh.docker('rm', '-fv', container_name)
Ejemplo n.º 15
0
def postgres_server():
    container_name = 'test_dila_postgres'
    sh.docker('run', '-d', '--name', container_name, 'postgres')
    log = sh.docker('logs', '-f', container_name, _iter=True, _ok_code=2)
    for line in log:
        if 'PostgreSQL init process complete; ready for start up.' in line:
            break
    time.sleep(1)
    log.terminate()
    yield container_name
    sh.docker('rm', '-fv', container_name)
Ejemplo n.º 16
0
def selenium_server():
    container_name = 'test_dila_selenium'
    sh.docker('run', '-d', '--name', container_name,
              'selenium/standalone-firefox')
    log = sh.docker('logs', '-f', container_name, _iter=True, _ok_code=2)
    for line in log:
        if 'Selenium Server is up and running' in line:
            break
    log.terminate()
    yield container_name
    sh.docker('rm', '-fv', container_name)
Ejemplo n.º 17
0
def running_server(postgres_server, ldap_server, docker_image):
    container_name = 'test_dila'
    sh.docker('run', '-d', '--name', container_name, '--link',
              '{}:db'.format(postgres_server), '--link',
              '{}:ldap'.format(ldap_server), docker_image)
    log = sh.docker('logs', '-f', container_name, _iter='err', _ok_code=2)
    for line in log:
        if 'Running on http://0.0.0.0:80/' in line:
            break
    log.terminate()
    yield container_name
    sh.docker('rm', '-fv', container_name)
Ejemplo n.º 18
0
def cleanup_inside(name):
    """
    Clean the inside of a container by deleting the containers and images within it.
    """
    docker("exec", "-t", name, "bash", "-c",
           "docker rm -f $(docker ps -qa) ; docker rmi $(docker images -qa)",
           _ok_code=[0,
                     1,  # Caused by 'docker: "rm" requires a minimum of 1 argument.' et al.
                     127,  # Caused by '"docker": no command found'
                     255,  # Caused by '"bash": executable file not found in $PATH'
                    ]
          )
Ejemplo n.º 19
0
def main():
    arguments = docopt(__doc__, version=dockerfly_version)
    docker_cli = dockerpy.Client(base_url='unix://var/run/docker.sock')

    container_json_exp = [{
            'image_name': 'centos:centos6',
            'run_cmd': '/bin/sleep 300',
            'eths':
            [
               ('testDockerflyv0', 'eth0', '192.168.159.10/24'),
               ('testDockerflyv1', 'eth0', '192.168.159.11/24'),
            ],
            'gateway': '192.168.159.2',
            'container_name': None,
            'status': 'stopped',
            'last_modify_time': 0,
            'id': 0,
            'pid': 0,
        }]

    if arguments['ps']:
        print docker('ps')

    if arguments['gen']:
        with open(arguments['<config_json>'], 'w') as config:
            json.dump(container_json_exp, config, indent=4, encoding='utf-8')

    if arguments['run']:
        with open(arguments['<config_json>'], 'r') as config:
            container_json = json.load(config, encoding='utf-8')
            for container in container_json:
                container_id = Container.run(container['image_name'],
                                             container['run_cmd'],
                                             container['eths'],
                                             container['gateway']
                                        )
                print "Container running:ContainerId(%s) Pid(%s)" %(container_id,
                                 docker_cli.inspect_container(container_id)['State']['Pid']
                        )

    if arguments['rm']:
        Container.remove(arguments['<container_id>'])

    if arguments['resize']:
        Container.resize(arguments['<container_id>'], arguments['<new_size>'])

    if arguments['getpid']:
        print docker_cli.inspect_container(arguments['<container_id>'])['State']['Pid']

    if arguments['getpid']:
        print docker_cli.inspect_container(arguments['<container_id>'])['State']['Pid']
        print "run dockerflyd server %s:%s" % (arguments['<ip>'], arguments['<port>'])
        rundaemon(arguments['<ip>'], arguments['<port>'])
Ejemplo n.º 20
0
def etcd_srv(ip, domain, name):
    sh.docker('run', '-d', '--name', 'etcd',
        '-p', '2379:2379', '-p', '2380:2380',
        '-v', '/etc/ssl/certs/:/etc/ssl/certs',
        ETCD, '--name', "{}.{}".format(name, domain),
        '--advertise-client-urls', 'http://{}:2379'.format(ip),
        '--initial-advertise-peer-urls', 'http://{}.{}:2380'.format(name,domain),
        '--listen-peer-urls', 'http://0.0.0.0:2380'.format(name,domain),
        '--listen-client-urls', 'http://0.0.0.0:2379',
        '--discovery-srv', domain,
        '--initial-cluster-state', 'new',
    )
Ejemplo n.º 21
0
 def docker_buil(self):
     sh.docker('build',
               '-t',
               self.images_tag,
               '-f',
               'Dockerfile',
               '--build-arg',
               'deploy_env=%s' % self.env,
               '--build-arg',
               'code_name=%s' % self.package_file,
               '.',
               _out=process_output)
Ejemplo n.º 22
0
Archivo: swarm.py Proyecto: bholt/ipa
def stop(args=None, opt=None):
    
    for host in hosts:
        try:
            containers = docker(host).ps("-aq").stdout.split()
            if len(containers) > 0:
                docker(host).stop(*containers)
                docker(host).rm(*containers)
        except sh.ErrorReturnCode_1:
            puts("#{colored.yellow('[warning]')} no docker running on #{host}")
        
        on(host).sudo.pkill("-f", "[d]ocker.*tcp://", _ok_code=[0,1])
    
    on(MASTER).sh(c="docker stop consul; docker rm consul", _ok_code=[0,1])
Ejemplo n.º 23
0
 def _image_is_right(self):
     """检查镜像是否可以拉取到"""
     try:
         print(
             f"[P_INFOS] The `{self.images_name}` image is ready to download ..."
         )
         sh.docker('pull', self.images_name)
         print(
             f"[P_INFOS] The `{self.images_name}` image is exist in docker repository."
         )
     except sh.ErrorReturnCode_1:
         print(
             f"[P_ERROR] The `{self.images_name}` image is not found in docker repository."
         )
         sys.exit(1)
def docker_commit(cont_dic):
    for c in cont_dic:
        c_name = c[0]
        c_nick = c[1]
        try:
            sh.docker('image', 'rm', c_nick)
            print('\tremoved image: {0} while commiting new image'.format(
                c_nick))
        except:
            pass
        print('\tdocker commit: container: {0} to image: {1}'.format(
            c_name, c_nick))
        sh.docker('commit', '-p', c_name, c_nick)
    print()
    print(sh.docker('images'))
Ejemplo n.º 25
0
def get_configs() -> list:
    """
    Returns a list of config names found in the swarm
    """
    configs = sh.docker('config', 'ls', '--format', '{{ .Name }}')

    return configs.stdout.decode('utf8').splitlines()
Ejemplo n.º 26
0
 async def post(self):
     post_data = self.request.body.decode('utf-8')
     post_data = json.loads(post_data)
     containerIP = str(
         sh.docker("inspect", "-f", "{{.NetworkSettings.IPAddress}}",
                   str(post_data["proID"]) + post_data["usrID"])).replace(
                       '\n', '').replace('\r', '')
     token = projectHandler.ip_search[containerIP]
     del projectHandler.ip_search[containerIP]
     del projectHandler.ip_map[token]
     # print(post_data)
     if post_data["exec"] == "exit1":
         send_data = docker_commands.exit1(post_data["usrID"],
                                           str(post_data["proID"]))
         ws = docker_websocketHandler.clients[post_data["usrID"]]
         del docker_websocketHandler.clients[post_data["usrID"]]
         ws.close()
     elif post_data["exec"] == "exit2":
         send_data = docker_commands.exit2(post_data["usrID"],
                                           str(post_data["proID"]))
         ws = docker_websocketHandler.clients[post_data["usrID"]]
         del docker_websocketHandler.clients[post_data["usrID"]]
         ws.close()
         openHandler.password[post_data["usrID"]] = 0
     else:
         send_data = {"error": "no such execution"}
     self.write(json.dumps(send_data))
Ejemplo n.º 27
0
def findImage(repository, tag):
    container=""
    try:
        container = sh.awk("{print $3}", _in=sh.head("-n 1", _in=sh.grep(tag, _in=sh.docker("images", repository))))
    except:
        print "container not available"
    return container.rstrip()
Ejemplo n.º 28
0
def findContainer(name):
    container=""
    try:
        container = sh.awk("{print $1}", _in=sh.head("-n 1", _in=sh.grep(name, _in=sh.docker("ps", "-a"))))
    except:
        print "container not available"
    return container.rstrip()
Ejemplo n.º 29
0
 def docker_image(self):
     _images_info = sh.docker('images',
                              '--filter=reference=%s' % self.images_tag)
     _img = list(_images_info)[1]
     _img_list = list(filter(None, _img.split(" ")))
     _img_tag = "%s:%s" % (_img_list[0], _img_list[1])
     return _img_tag
Ejemplo n.º 30
0
 def run(self, *args, **kwargs):
     if self.run_can_be_waited(*args, **kwargs):
         process: sh.RunningCommand = sh.docker(*args,
                                                _env=self.sh_env,
                                                _bg=True,
                                                **kwargs)
         try:
             process.wait()
         except KeyboardInterrupt as e:
             logger.info("Stopping running command...")
             process.signal(signal.SIGINT)
             raise e
     else:
         process: sh.RunningCommand = sh.docker(*args,
                                                _env=self.sh_env,
                                                **kwargs)
     return process
Ejemplo n.º 31
0
def docker_execute(command_list, logger):
    """ Run and tail a docker command. """
    import sh
    try:
        running_command = sh.docker(command_list, _iter=True)
        for line in running_command:
            logger.info(line.strip())
    except KeyboardInterrupt:
        logger.info("Requested to terminate running docker command.")
        running_command.process.terminate()
Ejemplo n.º 32
0
def docker_execute(command_list, logger):
    """ Run and tail a docker command. """
    import sh
    try:
        running_command = sh.docker(command_list, _iter=True)
        for line in running_command:
            logger.info(line.strip())
    except KeyboardInterrupt:
        logger.info("Requested to terminate running docker command.")
        running_command.process.terminate()
def run_docker_dev_test(path, coverage=False):
    """
    Method to check that docker runs with dev.yml
    """
    try:
        # build django, power up the stack and run the test
        sh.docker_compose("--file", "{}/dev.yml".format(path), "build",
                          "django")
        sh.docker_compose("--file", "{}/dev.yml".format(path), "build")
        if coverage:
            sh.docker_compose("--file", "{}/dev.yml".format(path), "run",
                              "django", "coverage", "run", "manage.py", "test")
            sh.docker_compose("--file", "{}/dev.yml".format(path), "run",
                              "django", "coverage", "xml", "-o",
                              "coverage.xml")
            shutil.copyfile(os.path.join(str(path), ".coverage"),
                            os.path.join(PROJECT_DIR, ".coverage"))
            shutil.copyfile(os.path.join(str(path), "coverage.xml"),
                            os.path.join(PROJECT_DIR, "coverage.xml"))
        else:
            sh.docker_compose("--file", "{}/dev.yml".format(path), "run",
                              "django", "python", "manage.py", "test")

        # test that the development server is running
        sh.docker_compose("--file", "{}/dev.yml".format(path), "up", "-d")
        time.sleep(10)
        curl = sh.curl("-I", "http://localhost:8000/")
        assert "200 OK" in curl
        assert "Server: Werkzeug" in curl

        # since we are running a lot of tests with different configurations,
        # we need to clean up the environment. Stop all running containers,
        # remove them and remove the postgres_data volume.
        sh.docker_compose("--file", "{}/dev.yml".format(path), "stop")
        sh.docker_compose("--file", "{}/dev.yml".format(path), "rm", "-f")
        sh.docker("volume", "rm",
                  "cookiecuttersaastestproject_postgres_data_dev")
    except sh.ErrorReturnCode as e:
        # in case there are errors it's good to have full output of
        # stdout and stderr.
        pytest.fail("STDOUT: {} \n\n\n STDERR: {}".format(
            e.stdout.decode("utf-8"), e.stderr.decode("utf-8")))
Ejemplo n.º 34
0
 def run_default(self):
     logger.info('Updating requirements for "{}"...'.format(self.image))
     sh.docker(
         'run',
         '--rm',
         '-i',
         '-u',
         '{}:{}'.format(self.uid, self.gid),
         '-e',
         'CUSTOM_COMPILE_COMMAND="kubeyard update_requirements"',
         '-e',
         'HOME=/tmp',
         *self.volumes,
         self.image,
         'bash',
         '-c',
         'freeze_requirements',
         _err=sys.stdout.buffer,
     )
     logger.info('Requirements updated!')
Ejemplo n.º 35
0
def findImage(repository, tag):
    container=""
    try:
        output = sh.awk("{print $3\":\"$2}", _in=sh.docker("images", repository))
        for row in output.split('\n'):
            containerId, containerTag = row.split(':')
            if containerTag == tag:
                container = containerId
                break
    except:
        print "container not available"
    return container.rstrip()
Ejemplo n.º 36
0
def main(dataset):
    tmpname="kolab/kolabtestcontainer:tmppopulated"
    imagename="kolab/kolabtestcontainer:populated-"+dataset

    SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))

    print("Building tmpcontainer...")
    docker.build("-t", tmpname, SCRIPT_DIR+"/kolabpopulated/.")
    print("Starting tmpcontainer...")
    container = docker.run("-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", SCRIPT_DIR+"/kolabpopulated/"+dataset+"/:/data/", tmpname).rstrip()

    # Wait for imap to become available on imaps://localhost:993
    time.sleep(5)

    print("Running populate.sh...")
    docker("exec", container,  "/data/populate.sh", _out=process_output)

    print("Comitting results...")
    docker.commit(container, imagename)
    docker.stop(container)
    docker.rm(container)
Ejemplo n.º 37
0
    def docker_run(self):
        _ps_info = sh.docker('ps', '-a', '--filter=name=%s' % self.deploy_name)
        _ps_list = list(_ps_info)
        if len(_ps_list) > 1:
            sh.docker('stop', self.project_name)
            sh.docker('rm', self.project_name)

        sh.docker('run', '-itd', '--name=%s' % self.deploy_name,
                  self.images_tag)
        time.sleep(15)
        _curl_cmd = "curl -sIL -w %{http_code} -o /dev/null http://localhost:8080/"
        _http_code = sh.docker('exec', '-i', self.deploy_name, '/bin/bash',
                               '-c', _curl_cmd)
        return _http_code
Ejemplo n.º 38
0
def findImage(repository, tag):
    container = ""
    try:
        output = sh.awk("{print $3\":\"$2}",
                        _in=sh.docker("images", repository))
        for row in output.split('\n'):
            containerId, containerTag = row.split(':')
            if containerTag == tag:
                container = containerId
                break
    except:
        print "container not available"
    return container.rstrip()
Ejemplo n.º 39
0
    def exec_docker_cmnd(docker_cmnd):
        if not isinstance(docker_cmnd, str):
            raise pipeline_error.NepheleBadArgumentException(
                msg='docker_exec_cmnd() only accepts strings.')

        args = shlex.split(docker_cmnd)
        try:
            docker = sh.docker(args)
        except BaseException:
            raise
        if docker.exit_code is not 0:
            raise pipeline_error.UnknownPipeError(msg=docker.stderr + "\n\n" +
                                                  args)
Ejemplo n.º 40
0
Archivo: swarm.py Proyecto: bholt/ipa
def start(args=None, opt=None):
    # start Consul key/value store for service discovery
    # on(MASTER).sudo(fmt("sh -c 'rm -rf /scratch/consul; nohup /homes/sys/bholt/bin/consul agent -server -bootstrap -data-dir /scratch/consul -node=master -bind=#{CONSUL} -client #{CONSUL} >#{CONSUL_LOG} 2>&1 &'"))
    cmd = fmt("--name=consul -d --net=host -p 8400:8400 -p 8500:8500 -p 8600:53/udp progrium/consul -server -bootstrap -node=master -bind=#{CONSUL} -client #{CONSUL}")
    on(MASTER).docker.run(cmd.split())

    time.sleep(4)
    
    for ssh in machines:
        
        # create new bridge network so our docker can run alongside the normal one
        if ssh.ip.link.show(BRIDGE, _ok_code=[0,1]).exit_code == 1:
            ssh.sudo.brctl.addbr(BRIDGE)
            ssh.sudo.ip.addr.add('10.20.30.1/24', 'dev', BRIDGE)
            ssh.sudo.ip.link.set('dev', 'swarm', 'up')
        
        # start docker daemon on remote host, headless via 'nohup', output to logfile
        ssh("sudo sh -c 'nohup docker daemon -H tcp://0.0.0.0:{dp} --exec-root=/var/run/docker.swarm --graph=/var/lib/docker.swarm --pidfile=/var/run/docker.swarm.pid --bridge={b} --cluster-advertise=ens1:{dp} --cluster-store=consul://{c}:{cp} >{log} 2>&1 &'".format(dp=DOCKER_PORT, c=CONSUL, cp=CONSUL_PORT, log="/var/log/docker.swarm", b=BRIDGE))
    
    time.sleep(1)
    # start Swarm manager
    nodelist = ','.join(["{}:{}".format(h, DOCKER_PORT) for h in hosts])
    docker(MASTER).run("--name=swarm", "-d", "--publish={}:2375".format(SWARM_PORT), "swarm:1.1.0", "--debug", "manage", "nodes://{}".format(nodelist))
Ejemplo n.º 41
0
def setenv():
    if 'Docker version 1.1.2' not in sh.docker(version=True):
        raise RuntimeError('Tested with docker 1.1.2 only. If you know this will work with other versions, '
                           'Update this code to be more flexible')
    if 'Vagrant 1.6.3' not in sh.vagrant(version=True):
        raise RuntimeError('Tested with vagrant 1.6.3 only. If you know this will work with other versions, '
                           'Update this code to be more flexible')
    for env_var, default_value in env_variables.items():
        if default_value and not os.environ.get(env_var):
            os.environ[env_var] = default_value
    cloudify_enviroment_varaible_names = ':'.join(env_variables.keys())
    os.environ['CLOUDIFY_ENVIRONMENT_VARIABLE_NAMES'] = cloudify_enviroment_varaible_names
    if not os.environ.get('TEST_SUITES_PATH'):
        suite_json_path = build_suites_json('suites/suites.json')
        os.environ['TEST_SUITES_PATH'] = suite_json_path
Ejemplo n.º 42
0
def main():
    tmpname = settings.kolabimagename("tmpbase")

    SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))

    print("Building tmpcontainer...")
    docker("build", "-t", tmpname, SCRIPT_DIR+"/kolab/")
    print("Starting tmpcontainer...")
    print(SCRIPT_DIR+"/fixRoundcubeT243.sh:/usr/share/roundcubemail/fixRoundcubeT243.sh")
    container = docker.run("-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", SCRIPT_DIR+"/kolab/fixRoundcubeT243.sh:/usr/share/roundcubemail/fixRoundcubeT243.sh", tmpname).rstrip()

    print("Setting up kolab")
    docker("exec", container,  "setup-kolab", "--default", "--timezone="+settings.TIMEZONE, "--directory-manager-pwd="+settings.LDAPPW, "--mysqlserver=new", _out=process_output)

    print("Fixing roundcube")
    docker("exec", container, "bash", "/usr/share/roundcubemail/fixRoundcubeT243.sh", _out=process_output)
    docker("exec", container, "systemctl", "restart", "httpd", _out=process_output)

    print("Comitting results...")
    docker.commit(container, settings.kolabimagename("base"))
    docker.stop(container)
    docker.rm(container)
def docker_execute(command_list, logger):
    """ Run and tail a docker command. """
    import sh
    running_command = sh.docker(command_list, _iter=True)
    for line in running_command:
        logger.info(line.strip())
Ejemplo n.º 44
0
def build(cp, package_info, commit, env_vars, dev_mode, use_public):

    # Set the build timestamp to now
    commit.dt_build = int(time())

    datadir = os.path.realpath(cp.get("DEFAULT", "datadir"))
    scriptsdir = os.path.realpath(cp.get("DEFAULT", "scriptsdir"))
    target = cp.get("DEFAULT", "target")
    yumrepodir = os.path.join("repos", commit.getshardedcommitdir())
    yumrepodir_abs = os.path.join(datadir, yumrepodir)

    commit_hash = commit.commit_hash
    project_name = commit.project_name
    repo_dir = commit.repo_dir

    # If yum repo already exists remove it and assume we're starting fresh
    if os.path.exists(yumrepodir_abs):
        shutil.rmtree(yumrepodir_abs)
    os.makedirs(yumrepodir_abs)

    sh.git("--git-dir", "%s/.git" % repo_dir,
           "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash)

    docker_run_cmd = []
    # expand the env name=value pairs into docker arguments
    if env_vars:
        for env_var in env_vars:
            docker_run_cmd.append('--env')
            docker_run_cmd.append(env_var)
    if (dev_mode or use_public):
            docker_run_cmd.append('--env')
            docker_run_cmd.append("DELOREAN_DEV=1")

    docker_run_cmd.extend(["-t", "--volume=%s:/data" % datadir,
                           "--volume=%s:/scripts" % scriptsdir,
                           "--name", "builder-%s" % target,
                           "delorean/%s" % target,
                           "/scripts/build_rpm_wrapper.sh", project_name,
                           "/data/%s" % yumrepodir, str(os.getuid()),
                           str(os.getgid())])
    try:
        sh.docker("run", docker_run_cmd)
    except Exception as e:
        logger.error('Docker cmd failed. See logs at: %s/%s/' % (datadir,
                                                                 yumrepodir))
        raise e
    finally:
        # Kill builder-"target" if running and remove if present
        try:
            sh.docker("kill", "builder-%s" % target)
            sh.docker("wait", "builder-%s" % target)
        except Exception:
            pass
        try:
            sh.docker("rm", "builder-%s" % target)
        except Exception:
            pass

    built_rpms = []
    for rpm in os.listdir(yumrepodir_abs):
        if rpm.endswith(".rpm"):
            built_rpms.append(os.path.join(yumrepodir, rpm))
    if not built_rpms:
        raise Exception("No rpms built for %s" % project_name)

    notes = "OK"
    if not os.path.isfile(os.path.join(yumrepodir_abs, "installed")):
        logger.error('Build failed. See logs at: %s/%s/' % (datadir,
                                                            yumrepodir))
        raise Exception("Error installing %s" % project_name)

    packages = [package["name"] for package in package_info["packages"]]
    for otherproject in packages:
        if otherproject == project_name:
            continue
        last_success = session.query(Commit).\
            filter(Commit.project_name == otherproject).\
            filter(Commit.status == "SUCCESS").\
            order_by(desc(Commit.id)).first()
        if not last_success:
            continue
        rpms = last_success.rpms.split(",")
        for rpm in rpms:
            rpm_link_src = os.path.join(yumrepodir_abs, os.path.split(rpm)[1])
            os.symlink(os.path.relpath(os.path.join(datadir, rpm),
                       yumrepodir_abs), rpm_link_src)

    sh.createrepo(yumrepodir_abs)

    fp = open(os.path.join(yumrepodir_abs,
                           "%s.repo" % cp.get("DEFAULT", "reponame")), "w")
    fp.write("[%s]\nname=%s-%s-%s\nbaseurl=%s/%s\nenabled=1\n"
             "gpgcheck=0\npriority=1" % (cp.get("DEFAULT", "reponame"),
                                         cp.get("DEFAULT", "reponame"),
                                         project_name, commit_hash,
                                         cp.get("DEFAULT", "baseurl"),
                                         commit.getshardedcommitdir()))
    fp.close()

    current_repo_dir = os.path.join(datadir, "repos", "current")
    os.symlink(os.path.relpath(yumrepodir_abs, os.path.join(datadir, "repos")),
               current_repo_dir + "_")
    os.rename(current_repo_dir + "_", current_repo_dir)
    return built_rpms, notes
Ejemplo n.º 45
0
def teardown_function(function):
    try:
        sh.docker('rmi', '-f', 'footest', 'bartest')
    except:
        pass
Ejemplo n.º 46
0
def main(dataset):
    tmpname="kolab/kolabtestcontainer:tmppopulated"
    imagename="kolab/kolabtestcontainer:populated-"+dataset
    basedir =  "{c.SCRIPT_DIR}/kolabpopulated".format(c=config)

    print("Building tmpcontainer...")
    docker.build(settings.dockerCacheString(), "-t", tmpname, "{basedir}/.".format(basedir=basedir))

    print("Starting tmpcontainer...")
    container = docker.run("-d", "-h", settings.HOSTNAME,
            "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro",
            "-v", "{basedir}/{dataset}/:/data/".format(basedir=basedir, dataset=dataset),
            '-v',  "{c.SCRIPT_DIR}/kolab/populate/:/populate".format(c=config),
            tmpname).rstrip()
    try:
        # Wait for imap to become available on imaps://localhost:993
        time.sleep(5)

        print "Populate OU..."
        docker("exec", container, "python2", "/populate/populate_ou.py", _out=sys.stdout)

        print "Populate users..."
        docker("exec", container, "python2", "/populate/populate_users.py", _out=sys.stdout)

        print "Populate resources..."
        docker("exec", container, "python2", "/populate/populate_resources.py", _out=sys.stdout)

        print("Running populate.sh...")
        docker("exec", container,  "/data/populate.sh", _out=sys.stdout)

        # Give kolabd some time to create all mailboxes
        time.sleep(5)

        docker("exec", container, "patch", "-R", "/etc/kolab/kolab.conf", "/data/kolab.conf.diff", _out=sys.stdout)
        docker("exec", container, "patch", "-R", "/etc/roundcubemail/calendar.inc.php", "/data/calendar.inc.php.diff", _out=sys.stdout)
        docker("exec", container, "patch", "-R", "/etc/roundcubemail/config.inc.php", "/data/config.inc.php.diff", _out=sys.stdout)
        docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_addressbook.inc.php", "/data/kolab_addressbook.inc.php.diff", _out=sys.stdout)
        docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_auth.inc.php", "/data/kolab_auth.inc.php.diff", _out=sys.stdout)
        docker("exec", container, "patch", "-R", "/etc/roundcubemail/password.inc.php", "/data/password.inc.php.diff", _out=sys.stdout)

        print("Comitting results to: {}".format(imagename))
        docker.commit(container, imagename)
    except:
        print("Unexpected error:", sys.exc_info()[0])
        traceback.print_exc(file=sys.stdout)
        print("Failed to setup container")

    docker.stop(container)
    docker.rm(container)
Ejemplo n.º 47
0
try:
	import sh
except:
	print("Could not find the sh module:")
	print("  sudo pip install sh")
	sys.exit(1)

from sh import ln
from sh import rm
from sh import docker


DOCKERFILES = [
	('bleScannerHCI', 'lab11/wearabouts-ble-scanner-js'),
	('bleScanner', 'lab11/wearabouts-ble-scanner-py')
]


# Get rid of the Dockerfile in the root
print('Removing existing Dockerfile if it exists')
rm('Dockerfile', '-f')

# Build each docker image
for dockerfile in DOCKERFILES:
	print('Building {}'.format(dockerfile[1]))
	ln('-s', dockerfile[0]+'/Dockerfile', 'Dockerfile')
	for chunk in docker('build', '-t', 'lab11/wearabouts-ble-scanner-js', '.'):
		print(chunk, end="")

	rm('Dockerfile')
 def _assert_no_docker_image_running(self, name):
     assert_that(name, is_not(is_in(sh.docker('ps'))))
Ejemplo n.º 49
0
 def _listen(self, stdin, **kwargs):
     """
     Feed a raw command to a container via stdin.
     """
     return docker("exec", "--interactive", self.name,
                   "bash", s=True, _in=stdin, **kwargs)
Ejemplo n.º 50
0
def test_single_image():
    sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
    f = StringIO()
    Docktree(restrict='footest', file=f).draw_tree()
    assert re.match(u'└─ sha256:[a-f0-9]{5} footest:latest\n', f.getvalue())
Ejemplo n.º 51
0
#!/usr/bin/env python2
import sh
import subprocess

def findContainer(name):
    container=""
    try:
        container = sh.awk("{print $1}", _in=sh.head("-n 1", _in=sh.grep(name, _in=sh.docker("ps", "-a"))))
    except:
        print "container not available"
    return container.rstrip()

containerName="kolab/kolabtestcontainer:latest"
container=findContainer(containerName)

if not container:
    print "starting container"
    subprocess.call("./run.sh", shell=True, cwd="kolab")
    container=findContainer(containerName)

print container
subprocess.call("./run.sh", shell=True, cwd="kontact")
sh.docker("stop", container)
sh.docker("rm", container)
Ejemplo n.º 52
0
def build(cp, package_info, dt, project, repo_dir, commit):
    datadir = os.path.realpath(cp.get("DEFAULT", "datadir"))
    # TODO : only working by convention need to improve
    scriptsdir = datadir.replace("data", "scripts")
    yumrepodir = os.path.join("repos", commit[:2], commit[2:4], commit)
    yumrepodir_abs = os.path.join(datadir, yumrepodir)

    # If yum repo already exists remove it and assume we're starting fresh
    if os.path.exists(yumrepodir_abs):
        shutil.rmtree(yumrepodir_abs)
    os.makedirs(yumrepodir_abs)

    # We need to make sure if any patches exist in the master-patches branch
    # they they can still be applied to upstream master, if they can we stop
    testpatches(project, commit, datadir)

    sh.git("--git-dir", "%s/.git" % repo_dir,
           "--work-tree=%s" % repo_dir, "reset", "--hard", commit)
    try:
        sh.docker("kill", "builder")
    except:
        pass

    # looks like we need to give the container time to die
    time.sleep(20)
    try:
        sh.docker("rm", "builder")
    except:
        pass

    try:
        sh.docker("run", "-t", "--volume=%s:/data" % datadir,
                  "--volume=%s:/scripts" % scriptsdir,
                  "--name", "builder", "delorean/fedora",
                  "/scripts/build_rpm_wrapper.sh", project,
                  "/data/%s" % yumrepodir)
    except:
        raise Exception("Error while building packages")

    built_rpms = []
    for rpm in os.listdir(yumrepodir_abs):
        if rpm.endswith(".rpm"):
            built_rpms.append(os.path.join(yumrepodir, rpm))
    if not built_rpms:
        raise Exception("No rpms built for %s" % project)

    notes = "OK"
    if not os.path.isfile(os.path.join(yumrepodir_abs, "installed")):
        notes = "Error installing"

    packages = [package["name"] for package in package_info["packages"]]
    for otherproject in packages:
        if otherproject == project:
            continue
        last_success = session.query(Commit).\
            filter(Commit.project_name == otherproject).\
            filter(Commit.status == "SUCCESS").\
            order_by(desc(Commit.dt_commit)).first()
        if not last_success:
            continue
        rpms = last_success.rpms.split(",")
        for rpm in rpms:
            rpm_link_src = os.path.join(yumrepodir_abs, os.path.split(rpm)[1])
            os.symlink(os.path.relpath(os.path.join(datadir, rpm),
                       yumrepodir_abs), rpm_link_src)

    sh.createrepo(yumrepodir_abs)

    fp = open(os.path.join(yumrepodir_abs, "delorean.repo"), "w")
    fp.write("[delorean]\nname=delorean-%s-%s\nbaseurl=%s/%s\nenabled=1\n"
             "gpgcheck=0" % (project, commit, cp.get("DEFAULT", "baseurl"),
                             yumrepodir))
    fp.close()

    current_repo_dir = os.path.join(datadir, "repos", "current")
    os.symlink(os.path.relpath(yumrepodir_abs, os.path.join(datadir, "repos")),
               current_repo_dir+"_")
    os.rename(current_repo_dir+"_", current_repo_dir)
    return built_rpms, notes
Ejemplo n.º 53
0
 def run_docker_sh(docker_cmd, docker_args=None, **kwargs):
     _docker_args = docker_args if docker_args is not None else []
     _docker_args.insert(0, docker_cmd)
     log.debug("Running 'docker {}' with args {}".format(docker_cmd, _docker_args[1:]))
     return sh.docker(_docker_args, **kwargs)
Ejemplo n.º 54
0
def docker_build(job_id, reponame, tag):
    print 'Handle', job_id, reponame, tag

    workspace = job_workspace(job_id)
    if not os.path.exists(workspace):
        os.makedirs(workspace)

    reply = rpost('/task/update', data=dict(id=job_id, status='building'))
    print 'reply:', reply
    bufio = StringIO.StringIO()
    lock = threading.Lock()
    running = True
    def _loop_report_stdout():
        pos = 0
        while True:
            lock.acquire()
            if not running: 
                lock.release()
                break
            if pos == bufio.tell():
                time.sleep(1)
                lock.release()
                continue
            bufio.read()
            reply = rpost('/task/update', data=dict(
                id=job_id, status='building', output=bufio.buf[pos:], append=1))
            print reply
            sys.stdout.write(bufio.buf[pos:])
            pos = bufio.tell()
            lock.release()
        print 'loop ended'
    t = threading.Thread(target=_loop_report_stdout)
    t.setDaemon(True)
    t.start()

    ret = sh.docker('run', '--rm',
            '-v', workspace+':/output',
            '-e', 'TIMEOUT=10m',
            '-e', 'HTTP_PROXY=%s'%gcfg.slave.http_proxy,
            DOCKER_IMAGE,
            '--repo', reponame,
            '--tag', tag, 
            _err_to_out=True, _out=bufio, _tee=True, _ok_code=range(255))

    jsonpath = pathjoin(workspace, 'out.json')
    out = json.load(open(pathjoin(workspace, 'out.json'))) if \
            os.path.exists(jsonpath) else {}
    out['success'] = (ret.exit_code == 0)
    out['output'] = str(ret)
    out['id'] = job_id
    out['safe_token'] = safe_token
    if not out['success']:
        rpost('/task/update', data=dict(id=job_id, status='error', 
            output = str(ret)))
        return

    rpost('/task/update', data=dict(
        id=job_id, 
        status='uploading'))

    #print 'Uploading files'
    print >>bufio, 'Uploading files'

    for osarch, info in out['files'].items():
        outname = info.get('outname')
        safetag = ''.join(re.findall('[-_.\w\d]+', tag.replace(':', '-v-')))
        key = pathjoin(reponame, safetag, outname)
        print >>bufio, 'File:', outname, key
        info['outlink'] = upload_file(key, pathjoin(workspace, outname))

        logname = info.get('logname')
        key = pathjoin(str(job_id), osarch, logname)
        #print 'Log: ', logname
        info['loglink'] = upload_file(key, pathjoin(workspace, logname))

    print >>bufio, '==DONE=='
    running = False
    t.join()

    json.dump(out, open('sample.json', 'w'))
    reply = rpost('/task/commit', data=json.dumps(out))
    print 'commit reply:', reply
Ejemplo n.º 55
0
def validate_model(abi,
                   serialno,
                   model_file_path,
                   weight_file_path,
                   platform,
                   device_type,
                   input_nodes,
                   output_nodes,
                   input_shapes,
                   output_shapes,
                   model_output_dir,
                   phone_data_dir,
                   caffe_env,
                   input_file_name="model_input",
                   output_file_name="model_out"):
    print("* Validate with %s" % platform)
    if abi != "host":
        for output_name in output_nodes:
            formatted_name = common.formatted_file_name(
                output_file_name, output_name)
            if os.path.exists("%s/%s" % (model_output_dir,
                                         formatted_name)):
                sh.rm("-rf", "%s/%s" % (model_output_dir, formatted_name))
            adb_pull("%s/%s" % (phone_data_dir, formatted_name),
                     model_output_dir, serialno)

    if platform == "tensorflow":
        validate(platform, model_file_path, "",
                 "%s/%s" % (model_output_dir, input_file_name),
                 "%s/%s" % (model_output_dir, output_file_name), device_type,
                 ":".join(input_shapes), ":".join(output_shapes),
                 ",".join(input_nodes), ",".join(output_nodes))
    elif platform == "caffe":
        image_name = "mace-caffe:latest"
        container_name = "mace_caffe_validator"

        if caffe_env == common.CaffeEnvType.LOCAL:
            import imp
            try:
                imp.find_module('caffe')
            except ImportError:
                logger.error('There is no caffe python module.')
            validate(platform, model_file_path, weight_file_path,
                     "%s/%s" % (model_output_dir, input_file_name),
                     "%s/%s" % (model_output_dir, output_file_name),
                     device_type,
                     ":".join(input_shapes), ":".join(output_shapes),
                     ",".join(input_nodes), ",".join(output_nodes))
        elif caffe_env == common.CaffeEnvType.DOCKER:
            docker_image_id = sh.docker("images", "-q", image_name)
            if not docker_image_id:
                print("Build caffe docker")
                sh.docker("build", "-t", image_name,
                          "third_party/caffe")

            container_id = sh.docker("ps", "-qa", "-f",
                                     "name=%s" % container_name)
            if container_id and not sh.docker("ps", "-qa", "--filter",
                                              "status=running", "-f",
                                              "name=%s" % container_name):
                sh.docker("rm", "-f", container_name)
                container_id = ""
            if not container_id:
                print("Run caffe container")
                sh.docker(
                        "run",
                        "-d",
                        "-it",
                        "--name",
                        container_name,
                        image_name,
                        "/bin/bash")

            for input_name in input_nodes:
                formatted_input_name = common.formatted_file_name(
                        input_file_name, input_name)
                sh.docker(
                        "cp",
                        "%s/%s" % (model_output_dir, formatted_input_name),
                        "%s:/mace" % container_name)

            for output_name in output_nodes:
                formatted_output_name = common.formatted_file_name(
                        output_file_name, output_name)
                sh.docker(
                        "cp",
                        "%s/%s" % (model_output_dir, formatted_output_name),
                        "%s:/mace" % container_name)
            model_file_name = os.path.basename(model_file_path)
            weight_file_name = os.path.basename(weight_file_path)
            sh.docker("cp", "tools/common.py", "%s:/mace" % container_name)
            sh.docker("cp", "tools/validate.py", "%s:/mace" % container_name)
            sh.docker("cp", model_file_path, "%s:/mace" % container_name)
            sh.docker("cp", weight_file_path, "%s:/mace" % container_name)

            sh.docker(
                "exec",
                container_name,
                "python",
                "-u",
                "/mace/validate.py",
                "--platform=caffe",
                "--model_file=/mace/%s" % model_file_name,
                "--weight_file=/mace/%s" % weight_file_name,
                "--input_file=/mace/%s" % input_file_name,
                "--mace_out_file=/mace/%s" % output_file_name,
                "--device_type=%s" % device_type,
                "--input_node=%s" % ",".join(input_nodes),
                "--output_node=%s" % ",".join(output_nodes),
                "--input_shape=%s" % ":".join(input_shapes),
                "--output_shape=%s" % ":".join(output_shapes),
                _fg=True)

    print("Validation done!\n")