示例#1
0
    def callback(self, ch, method, properties, body):
        data = json.loads(body)
        submit_data = SubmitData(data["id"], data["problem_tag"],
                                 data["source_code"], data["author"],
                                 data["language"])

        if self.check_language_exists(submit_data.language):
            return JudgeResult("WA", "Cannot run with specified language")

        # TODO: docker走らす系の処理
        docker_client = DockerClient(submit_data)
        res = docker_client.fetch_problem_testcases(self.db_session)
        print(res)
示例#2
0
    def create_run_manager(w):
        if args.batch_queue is None:
            # We defer importing the run managers so their dependencies are lazily loaded
            from docker_run import DockerRunManager
            from docker_client import DockerClient
            from docker_image_manager import DockerImageManager

            logging.info("Using local docker client for run submission.")

            docker = DockerClient()
            image_manager = DockerImageManager(docker, args.work_dir, max_images_bytes)
            cpuset = parse_cpuset_args(args.cpuset)
            gpuset = parse_gpuset_args(docker, args.gpuset)
            return DockerRunManager(docker, bundle_service, image_manager, w, args.network_prefix, cpuset, gpuset)
        else:
            try:
                import boto3
            except ImportError:
                logging.exception("Missing dependencies, please install boto3 to enable AWS support.")
                import sys
                sys.exit(1)

            from aws_batch import AwsBatchRunManager

            logging.info("Using AWS Batch queue %s for run submission.", args.batch_queue)

            batch_client = boto3.client('batch')
            return AwsBatchRunManager(batch_client, args.batch_queue, bundle_service, w)
示例#3
0
def scan(images=True):
    """ scanning method that will scan all images or containers """

    client = DockerClient()
    emu = Emulator()

    objs = client.images() if images is True else client.containers()

    # If there are no images/containers on the machine, objs will be ['']
    if objs == ['']:
        return

    # does actual work here!
    for im in objs:
        try:
            emu.create_dirs()
            mount_obj(emu.tmp_image_dir, im, client.info()['Storage Driver'])

            if emu.is_applicable():

                print "scanning " + im[:12]
                emu.intial_setup()

                emu.chroot_and_run()

                emu.unmount()
            else:
                print im[:12] + " is not RHEL based"

            unmount_obj(emu.tmp_image_dir, client.info()['Storage Driver'])
            emu.remove_dirs()

        except MountError as dme:
            raise ValueError(str(dme))

    emu.gather_data(images)
def main():
    """
    Generates a Docker Image of ChefSDK based on a local dockerfile.
    Installs a local recipe in the image.
    :return:
    """
    # Parse the command line arguments
    parser = argparse.ArgumentParser(description="Generates Docker images based on Chef cookbooks")
    parser.add_argument("chef_name", help="The chef cookbook to deploy")
    parser.add_argument(
        "-f",
        "--fast",
        dest="fast",
        action="store_true",
        help="Fast save (RAM intensive), slow commandline save by default",
    )
    parser.add_argument("-H", dest="host", help="docker url (defaults to local socket")
    parser.add_argument("-v", "--verbose", dest="debug", action="store_true", help="Show verbose messages")
    args = parser.parse_args()

    # logging management
    lev = logging.ERROR
    if args.debug:
        lev = logging.DEBUG
    logging.basicConfig(level=lev)
    # generate the docker image
    print "Connecting to Docker Client...",
    dc = DockerClient(args.host)
    print "OK"
    print "Generating Image...",
    sta = dc.generate_image("ChefImage.docker", args.chef_name)
    print "OK" if sta else "FAILED"
    print "Saving Image file docker-%s.tar to disk ..." % args.chef_name,
    if args.fast:
        dc.save_image()
    else:
        dc.save_image_cmd()
    print "OK"
示例#5
0
def scan(images=True):
    """ scanning method that will scan all images or containers """

    client = DockerClient()
    emu = Emulator()

    objs = client.images() if images is True else client.containers()

    # If there are no images/containers on the machine, objs will be ['']
    if objs == ['']:
        return

    # does actual work here!
    for im in objs:
        try:
            emu.create_dirs()
            mount_obj(emu.tmp_image_dir, im, client.info()['Storage Driver'])

            if emu.is_applicable():

                print "scanning " + im[:12]
                emu.intial_setup()

                emu.chroot_and_run()

                emu.unmount()
            else:
                print im[:12] + " is not RHEL based"

            unmount_obj(emu.tmp_image_dir, client.info()['Storage Driver'])
            emu.remove_dirs()

        except MountError as dme:
            raise ValueError(str(dme))

    emu.gather_data(images)
示例#6
0
            print >> sys.stderr, """
Permissions on password file are too lax.
Only the user should be allowed to access the file.
On Linux, run:
chmod 600 %s""" % args.password_file
            exit(1)
        with open(args.password_file) as f:
            username = f.readline().strip()
            password = f.readline().strip()
    else:
        username = raw_input('Username: '******'%(asctime)s %(message)s',
                            level=logging.DEBUG)

    max_work_dir_size_bytes = parse_size(args.max_work_dir_size)
    worker = Worker(args.id, args.tag, args.work_dir, max_work_dir_size_bytes,
                    args.shared_file_system, args.slots,
                    BundleServiceClient(args.server, username, password),
                    DockerClient())

    # Register a signal handler to ensure safe shutdown.
    for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
        signal.signal(sig, lambda signup, frame: worker.signal())

    print 'Worker started.'
    worker.run()
示例#7
0
def main():
    parser = argparse.ArgumentParser(description='CodaLab worker.')
    parser.add_argument('--tag',
                        help='Tag that allows for scheduling runs on specific '
                        'workers.')
    parser.add_argument(
        '--server',
        default='https://worksheets.codalab.org',
        help='URL of the CodaLab server, in the format '
        '<http|https>://<hostname>[:<port>] (e.g., https://worksheets.codalab.org)'
    )
    parser.add_argument('--work-dir',
                        default='codalab-worker-scratch',
                        help='Directory where to store temporary bundle data, '
                        'including dependencies and the data from run '
                        'bundles.')
    parser.add_argument('--max-work-dir-size',
                        type=str,
                        metavar='SIZE',
                        default='10g',
                        help='Maximum size of the temporary bundle data '
                        '(e.g., 3, 3k, 3m, 3g, 3t).')
    parser.add_argument(
        '--max-image-cache-size',
        type=str,
        metavar='SIZE',
        help='Limit the disk space used to cache Docker images '
        'for worker jobs to the specified amount (e.g. '
        '3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
        'the least recently used images are removed first. '
        'Worker will not remove any images if this option '
        'is not specified.')
    parser.add_argument('--slots',
                        type=int,
                        default=1,
                        help='Number of slots to use for running bundles. '
                        'A single bundle takes up a single slot.')
    parser.add_argument('--password-file',
                        help='Path to the file containing the username and '
                        'password for logging into the bundle service, '
                        'each on a separate line. If not specified, the '
                        'password is read from standard input.')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Whether to output verbose log messages.')
    parser.add_argument('--id',
                        default='%s(%d)' % (socket.gethostname(), os.getpid()),
                        help='Internal use: ID to use for the worker.')
    parser.add_argument(
        '--shared-file-system',
        action='store_true',
        help='Internal use: Whether the file system containing '
        'bundle data is shared between the bundle service '
        'and the worker.')
    args = parser.parse_args()

    # Get the username and password.
    logger.info('Connecting to %s' % args.server)
    if args.password_file:
        if os.stat(args.password_file).st_mode & (stat.S_IRWXG | stat.S_IRWXO):
            print >> sys.stderr, """
Permissions on password file are too lax.
Only the user should be allowed to access the file.
On Linux, run:
chmod 600 %s""" % args.password_file
            exit(1)
        with open(args.password_file) as f:
            username = f.readline().strip()
            password = f.readline().strip()
    else:
        username = os.environ.get('CODALAB_USERNAME')
        if username is None:
            username = raw_input('Username: '******'CODALAB_PASSWORD')
        if password is None:
            password = getpass.getpass()

    # Set up logging.
    if args.verbose:
        logging.basicConfig(format='%(asctime)s %(message)s',
                            level=logging.DEBUG)
    else:
        logging.basicConfig(format='%(asctime)s %(message)s',
                            level=logging.INFO)

    max_work_dir_size_bytes = parse_size(args.max_work_dir_size)
    if args.max_image_cache_size is None:
        max_images_bytes = None
    else:
        max_images_bytes = parse_size(args.max_image_cache_size)
    worker = Worker(args.id, args.tag, args.work_dir, max_work_dir_size_bytes,
                    max_images_bytes, args.shared_file_system, args.slots,
                    BundleServiceClient(args.server, username, password),
                    DockerClient())

    # Register a signal handler to ensure safe shutdown.
    for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
        signal.signal(sig, lambda signup, frame: worker.signal())

    logger.info('Worker started.')
    worker.run()
示例#8
0
 def __init__(self, mountpoint, live=False, mnt_mkdir=False):
     Mount.__init__(self, mountpoint, live)
     self.client = docker.Client()
     self.docker_client = DockerClient()
     self.mnt_mkdir = mnt_mkdir
示例#9
0
class DockerMount(Mount):

    """
    A class which can be used to mount and unmount docker containers and
    images on a filesystem location.

    mnt_mkdir = Create temporary directories based on the cid at mountpoint
                for mounting containers
    """

    def __init__(self, mountpoint, live=False, mnt_mkdir=False):
        Mount.__init__(self, mountpoint, live)
        self.client = docker.Client()
        self.docker_client = DockerClient()
        self.mnt_mkdir = mnt_mkdir

    def _create_temp_container(self, iid):
        """
        Create a temporary container from a given iid.

        Temporary containers are marked with a sentinel environment
        variable so that they can be cleaned on unmount.
        """
        try:
            return self.docker_client.create_container(iid)
        except docker.errors.APIError as ex:
            raise MountError('Error creating temporary container:\n' + str(ex))

    def _clone(self, cid):
        """
        Create a temporary image snapshot from a given cid.

        Temporary image snapshots are marked with a sentinel label
        so that they can be cleaned on unmount.
        """
        try:
            iid = self.docker_client.commit(cid)
        except docker.errors.APIError as ex:
            raise MountError(str(ex))
        return self._create_temp_container(iid)

    def _identifier_as_cid(self, identifier):
        """
        Returns a container uuid for identifier.

        If identifier is an image UUID or image tag, create a temporary
        container and return its uuid.
        """

        if self.docker_client.is_a_container(identifier):
            if self.live:
                return identifier
            else:
                return self._clone(identifier)
        elif self.docker_client.is_an_image(identifier):
            return self._create_temp_container(identifier)
        else:
            raise MountError('{} did not match any image or container.'
                             ''.format(identifier))

    @staticmethod
    def _no_gd_api_dm(cid):
        # TODO: Deprecated
        desc_file = os.path.join('/var/lib/docker/devicemapper/metadata', cid)
        desc = json.loads(open(desc_file).read())
        return desc['device_id'], desc['size']

    @staticmethod
    def _no_gd_api_overlay(cid):
        # TODO: Deprecated
        prefix = os.path.join('/var/lib/docker/overlay/', cid)
        ld_metafile = open(os.path.join(prefix, 'lower-id'))
        ld_loc = os.path.join('/var/lib/docker/overlay/', ld_metafile.read())
        return (os.path.join(ld_loc, 'root'), os.path.join(prefix, 'upper'),
                os.path.join(prefix, 'work'))

    def mount(self, identifier, options=[]):
        """
        Mounts a container or image referred to by identifier to
        the host filesystem.
        """
        driver = self.docker_client.info()['Storage Driver']
        driver_mount_fn = getattr(self, "_mount_" + driver,
                                  self._unsupported_backend)
        driver_mount_fn(identifier, options)

        # Return mount path so it can be later unmounted by path
        return self.mountpoint

    def _unsupported_backend(self, identifier='', options=[]):
        # raise MountError('Atomic mount is not supported on the {} docker '
        #                  'storage backend.'
        #                  ''.format(self.client.info()['Driver']))
        driver = self.docker_client.info()['Storage Driver']
        raise MountError('Atomic mount is not supported on the {} docker '
                         'storage backend.'
                         ''.format(driver))

    def _mount_devicemapper(self, identifier, options):
        """
        Devicemapper mount backend.
        """
        if os.geteuid() != 0:
            raise MountError('Insufficient privileges to mount device.')

        if self.live and options:
            raise MountError('Cannot set mount options for live container '
                             'mount.')

        # info = self.client.info()
        info = self.docker_client.info()

        cid = self._identifier_as_cid(identifier)

        if self.mnt_mkdir:
            # If the given mount_path is just a parent dir for where
            # to mount things by cid, then the new mountpoint is the
            # mount_path plus the first 20 chars of the cid
            self.mountpoint = os.path.join(self.mountpoint, cid[:20])
            try:
                os.mkdir(self.mountpoint)
            except Exception as e:
                raise MountError(e)

        # cinfo = self.client.inspect_container(cid)
        cinfo = self.docker_client.inspect(cid)

        if self.live and not cinfo['State']['Running']:
            self._cleanup_container(cinfo)
            raise MountError('Cannot live mount non-running container.')

        options = [] if self.live else ['ro', 'nosuid', 'nodev']

        dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
        # dm_pool = info['DriverStatus'][0][1]
        dm_pool = info['Pool Name']
        try:
            #FIXME, GraphDriver isn't in inspect container output
            dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
            dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
            dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
        except:
            # TODO: deprecated when GraphDriver patch makes it upstream
            dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
            dm_dev_name = dm_pool.replace('pool', cid)

        dm_dev_path = os.path.join('/dev/mapper', dm_dev_name)
        # If the device isn't already there, activate it.
        if not os.path.exists(dm_dev_path):
            if self.live:
                raise MountError('Error: Attempted to live-mount unactivated '
                                 'device.')

            Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
                                        dm_pool)

        # XFS should get nosuid
        fstype = Mount._get_fs(dm_dev_path)
        if fstype.upper() == 'XFS' and 'suid' not in options:
            if 'nosuid' not in options:
                options.append('nosuid')
        try:
            Mount.mount_path(dm_dev_path, self.mountpoint,
                             optstring=(','.join(options)))
        except MountError as de:
            if not self.live:
                Mount._remove_thin_device(dm_dev_name)
            self._cleanup_container(cinfo)
            raise de

    def _mount_overlay(self, identifier, options):
        """
        OverlayFS mount backend.
        """
        if os.geteuid() != 0:
            raise MountError('Insufficient privileges to mount device.')

        if self.live:
            raise MountError('The OverlayFS backend does not support live '
                             'mounts.')
        elif 'rw' in options:
            raise MountError('The OverlayFS backend does not support '
                             'writeable mounts.')

        cid = self._identifier_as_cid(identifier)
        # cinfo = self.client.inspect_container(cid)
        cinfo = self.docker_client.inspect(cid)

        ld, ud, wd = '', '', ''
        try:
            #FIXME, GraphDriver isn't in inspect container output
            ld = cinfo['GraphDriver']['Data']['lowerDir']
            ud = cinfo['GraphDriver']['Data']['upperDir']
            wd = cinfo['GraphDriver']['Data']['workDir']
        except:
            ld, ud, wd = DockerMount._no_gd_api_overlay(cid)

        options += ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd]
        optstring = ','.join(options)
        cmd = ['mount', '-t', 'overlay', '-o', optstring, 'overlay',
               self.mountpoint]
        status = util.subp(cmd)

        if status.return_code != 0:
            self._cleanup_container(cinfo)
            raise MountError('Failed to mount OverlayFS device.\n' +
                             status.stderr.decode(sys.getdefaultencoding()))

    def _cleanup_container(self, cinfo):
        """
        Remove a container and clean up its image if necessary.
        """
        # I'm not a fan of doing this again here.
        env = cinfo['Config']['Env']
        if (env and '_RHAI_TEMP_CONTAINER' not in env) or not env:
            return

        iid = cinfo['Image']

        # self.client.remove_container(cinfo['Id'])
        self.docker_client.remove_container(cinfo['Id'])

        info = self.docker_client.inspect(iid)


        ##FIXME info['Config'] will be a null value and cause an exception if not RHEL based....
        try:
            if info['Config']:
                if '_RHAI_TEMP_CONTAINER=True' in info['Config']['Env']:
                    #FIXME THIS IS BROKEN
                    self.docker_client.remove_image(iid)
        except:
            pass

        # If we are creating temporary dirs for mount points
        # based on the cid, then we should rmdir them while
        # cleaning up.
        if self.mnt_mkdir:
            try:
                os.rmdir(self.mountpoint)
            except Exception as e:
                raise MountError(e)

    def unmount(self):
        """
        Unmounts and cleans-up after a previous mount().
        """
        # driver = self.client.info()['Driver']
        driver = self.docker_client.info()['Storage Driver']
        driver_unmount_fn = getattr(self, "_unmount_" + driver,
                                    self._unsupported_backend)
        driver_unmount_fn()

    def _unmount_devicemapper(self):
        """
        Devicemapper unmount backend.
        """
        # pool = self.client.info()['DriverStatus'][0][1]
        pool = self.docker_client.info()['Pool Name']
        dev = Mount.get_dev_at_mountpoint(self.mountpoint)

        dev_name = dev.replace('/dev/mapper/', '')
        if not dev_name.startswith(pool.rsplit('-', 1)[0]):
            raise MountError('Device mounted at {} is not a docker container.'
                             ''.format(self.mountpoint))

        cid = dev_name.replace(pool.replace('pool', ''), '')
        try:
            # self.client.inspect_container(cid)
            self.docker_client.inspect(cid)
        except docker.errors.APIError:
            raise MountError('Failed to associate device {0} mounted at {1} '
                             'with any container.'.format(dev_name,
                                                          self.mountpoint))

        Mount.unmount_path(self.mountpoint)
        cinfo = self.docker_client.inspect(cid)

        # Was the container live mounted? If so, done.
        # TODO: Container.Config.Env should be {} (iterable) not None.
        #       Fix in docker-py.
        env = cinfo['Config']['Env']
        if (env and '_RHAI_TEMP_CONTAINER' not in env) or not env:
            return

        Mount._remove_thin_device(dev_name)
        self._cleanup_container(cinfo)

    def _get_overlay_mount_cid(self):
        """
        Returns the cid of the container mounted at mountpoint.
        """
        cmd = ['findmnt', '-o', 'OPTIONS', '-n', self.mountpoint]
        r = util.subp(cmd)
        if r.return_code != 0:
            raise MountError('No devices mounted at that location.')
        optstring = r.stdout.strip().split('\n')[-1]
        upperdir = [o.replace('upperdir=', '') for o in optstring.split(',')
                    if o.startswith('upperdir=')][0]
        cdir = upperdir.rsplit('/', 1)[0]
        if not cdir.startswith('/var/lib/docker/overlay/'):
            raise MountError('The device mounted at that location is not a '
                             'docker container.')
        return cdir.replace('/var/lib/docker/overlay/', '')

    def _unmount_overlay(self):
        """
        OverlayFS unmount backend.
        """
        if Mount.get_dev_at_mountpoint(self.mountpoint) != 'overlay':
            raise MountError('Device mounted at {} is not an atomic mount.')
        cid = self._get_overlay_mount_cid()
        Mount.unmount_path(self.mountpoint)
        self._cleanup_container(self.docker_client.inspect(cid))
示例#10
0
 def __init__(self, mountpoint, live=False, mnt_mkdir=False):
     Mount.__init__(self, mountpoint, live)
     self.client = docker.Client()
     self.docker_client = DockerClient()
     self.mnt_mkdir = mnt_mkdir
示例#11
0
class DockerMount(Mount):
    """
    A class which can be used to mount and unmount docker containers and
    images on a filesystem location.

    mnt_mkdir = Create temporary directories based on the cid at mountpoint
                for mounting containers
    """
    def __init__(self, mountpoint, live=False, mnt_mkdir=False):
        Mount.__init__(self, mountpoint, live)
        self.client = docker.Client()
        self.docker_client = DockerClient()
        self.mnt_mkdir = mnt_mkdir

    def _create_temp_container(self, iid):
        """
        Create a temporary container from a given iid.

        Temporary containers are marked with a sentinel environment
        variable so that they can be cleaned on unmount.
        """
        try:
            return self.docker_client.create_container(iid)
        except docker.errors.APIError as ex:
            raise MountError('Error creating temporary container:\n' + str(ex))

    def _clone(self, cid):
        """
        Create a temporary image snapshot from a given cid.

        Temporary image snapshots are marked with a sentinel label
        so that they can be cleaned on unmount.
        """
        try:
            iid = self.docker_client.commit(cid)
        except docker.errors.APIError as ex:
            raise MountError(str(ex))
        return self._create_temp_container(iid)

    def _identifier_as_cid(self, identifier):
        """
        Returns a container uuid for identifier.

        If identifier is an image UUID or image tag, create a temporary
        container and return its uuid.
        """

        if self.docker_client.is_a_container(identifier):
            if self.live:
                return identifier
            else:
                return self._clone(identifier)
        elif self.docker_client.is_an_image(identifier):
            return self._create_temp_container(identifier)
        else:
            raise MountError('{} did not match any image or container.'
                             ''.format(identifier))

    @staticmethod
    def _no_gd_api_dm(cid):
        # TODO: Deprecated
        desc_file = os.path.join('/var/lib/docker/devicemapper/metadata', cid)
        desc = json.loads(open(desc_file).read())
        return desc['device_id'], desc['size']

    @staticmethod
    def _no_gd_api_overlay(cid):
        # TODO: Deprecated
        prefix = os.path.join('/var/lib/docker/overlay/', cid)
        ld_metafile = open(os.path.join(prefix, 'lower-id'))
        ld_loc = os.path.join('/var/lib/docker/overlay/', ld_metafile.read())
        return (os.path.join(ld_loc, 'root'), os.path.join(prefix, 'upper'),
                os.path.join(prefix, 'work'))

    def mount(self, identifier, options=[]):
        """
        Mounts a container or image referred to by identifier to
        the host filesystem.
        """
        driver = self.docker_client.info()['Storage Driver']
        driver_mount_fn = getattr(self, "_mount_" + driver,
                                  self._unsupported_backend)
        driver_mount_fn(identifier, options)

        # Return mount path so it can be later unmounted by path
        return self.mountpoint

    def _unsupported_backend(self, identifier='', options=[]):
        # raise MountError('Atomic mount is not supported on the {} docker '
        #                  'storage backend.'
        #                  ''.format(self.client.info()['Driver']))
        driver = self.docker_client.info()['Storage Driver']
        raise MountError('Atomic mount is not supported on the {} docker '
                         'storage backend.'
                         ''.format(driver))

    def _mount_devicemapper(self, identifier, options):
        """
        Devicemapper mount backend.
        """
        if os.geteuid() != 0:
            raise MountError('Insufficient privileges to mount device.')

        if self.live and options:
            raise MountError('Cannot set mount options for live container '
                             'mount.')

        # info = self.client.info()
        info = self.docker_client.info()

        cid = self._identifier_as_cid(identifier)

        if self.mnt_mkdir:
            # If the given mount_path is just a parent dir for where
            # to mount things by cid, then the new mountpoint is the
            # mount_path plus the first 20 chars of the cid
            self.mountpoint = os.path.join(self.mountpoint, cid[:20])
            try:
                os.mkdir(self.mountpoint)
            except Exception as e:
                raise MountError(e)

        # cinfo = self.client.inspect_container(cid)
        cinfo = self.docker_client.inspect(cid)

        if self.live and not cinfo['State']['Running']:
            self._cleanup_container(cinfo)
            raise MountError('Cannot live mount non-running container.')

        options = [] if self.live else ['ro', 'nosuid', 'nodev']

        dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
        # dm_pool = info['DriverStatus'][0][1]
        dm_pool = info['Pool Name']
        try:
            #FIXME, GraphDriver isn't in inspect container output
            dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
            dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
            dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
        except:
            # TODO: deprecated when GraphDriver patch makes it upstream
            dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
            dm_dev_name = dm_pool.replace('pool', cid)

        dm_dev_path = os.path.join('/dev/mapper', dm_dev_name)
        # If the device isn't already there, activate it.
        if not os.path.exists(dm_dev_path):
            if self.live:
                raise MountError('Error: Attempted to live-mount unactivated '
                                 'device.')

            Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
                                        dm_pool)

        # XFS should get nosuid
        fstype = Mount._get_fs(dm_dev_path)
        if fstype.upper() == 'XFS' and 'suid' not in options:
            if 'nosuid' not in options:
                options.append('nosuid')
        try:
            Mount.mount_path(dm_dev_path,
                             self.mountpoint,
                             optstring=(','.join(options)))
        except MountError as de:
            if not self.live:
                Mount._remove_thin_device(dm_dev_name)
            self._cleanup_container(cinfo)
            raise de

    def _mount_overlay(self, identifier, options):
        """
        OverlayFS mount backend.
        """
        if os.geteuid() != 0:
            raise MountError('Insufficient privileges to mount device.')

        if self.live:
            raise MountError('The OverlayFS backend does not support live '
                             'mounts.')
        elif 'rw' in options:
            raise MountError('The OverlayFS backend does not support '
                             'writeable mounts.')

        cid = self._identifier_as_cid(identifier)
        # cinfo = self.client.inspect_container(cid)
        cinfo = self.docker_client.inspect(cid)

        ld, ud, wd = '', '', ''
        try:
            #FIXME, GraphDriver isn't in inspect container output
            ld = cinfo['GraphDriver']['Data']['lowerDir']
            ud = cinfo['GraphDriver']['Data']['upperDir']
            wd = cinfo['GraphDriver']['Data']['workDir']
        except:
            ld, ud, wd = DockerMount._no_gd_api_overlay(cid)

        options += ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd]
        optstring = ','.join(options)
        cmd = [
            'mount', '-t', 'overlay', '-o', optstring, 'overlay',
            self.mountpoint
        ]
        status = util.subp(cmd)

        if status.return_code != 0:
            self._cleanup_container(cinfo)
            raise MountError('Failed to mount OverlayFS device.\n' +
                             status.stderr.decode(sys.getdefaultencoding()))

    def _cleanup_container(self, cinfo):
        """
        Remove a container and clean up its image if necessary.
        """
        # I'm not a fan of doing this again here.
        env = cinfo['Config']['Env']
        if (env and '_RHAI_TEMP_CONTAINER' not in env) or not env:
            return

        iid = cinfo['Image']

        # self.client.remove_container(cinfo['Id'])
        self.docker_client.remove_container(cinfo['Id'])

        info = self.docker_client.inspect(iid)

        ##FIXME info['Config'] will be a null value and cause an exception if not RHEL based....
        try:
            if info['Config']:
                if '_RHAI_TEMP_CONTAINER=True' in info['Config']['Env']:
                    #FIXME THIS IS BROKEN
                    self.docker_client.remove_image(iid)
        except:
            pass

        # If we are creating temporary dirs for mount points
        # based on the cid, then we should rmdir them while
        # cleaning up.
        if self.mnt_mkdir:
            try:
                os.rmdir(self.mountpoint)
            except Exception as e:
                raise MountError(e)

    def unmount(self):
        """
        Unmounts and cleans-up after a previous mount().
        """
        # driver = self.client.info()['Driver']
        driver = self.docker_client.info()['Storage Driver']
        driver_unmount_fn = getattr(self, "_unmount_" + driver,
                                    self._unsupported_backend)
        driver_unmount_fn()

    def _unmount_devicemapper(self):
        """
        Devicemapper unmount backend.
        """
        # pool = self.client.info()['DriverStatus'][0][1]
        pool = self.docker_client.info()['Pool Name']
        dev = Mount.get_dev_at_mountpoint(self.mountpoint)

        dev_name = dev.replace('/dev/mapper/', '')
        if not dev_name.startswith(pool.rsplit('-', 1)[0]):
            raise MountError('Device mounted at {} is not a docker container.'
                             ''.format(self.mountpoint))

        cid = dev_name.replace(pool.replace('pool', ''), '')
        try:
            # self.client.inspect_container(cid)
            self.docker_client.inspect(cid)
        except docker.errors.APIError:
            raise MountError('Failed to associate device {0} mounted at {1} '
                             'with any container.'.format(
                                 dev_name, self.mountpoint))

        Mount.unmount_path(self.mountpoint)
        cinfo = self.docker_client.inspect(cid)

        # Was the container live mounted? If so, done.
        # TODO: Container.Config.Env should be {} (iterable) not None.
        #       Fix in docker-py.
        env = cinfo['Config']['Env']
        if (env and '_RHAI_TEMP_CONTAINER' not in env) or not env:
            return

        Mount._remove_thin_device(dev_name)
        self._cleanup_container(cinfo)

    def _get_overlay_mount_cid(self):
        """
        Returns the cid of the container mounted at mountpoint.
        """
        cmd = ['findmnt', '-o', 'OPTIONS', '-n', self.mountpoint]
        r = util.subp(cmd)
        if r.return_code != 0:
            raise MountError('No devices mounted at that location.')
        optstring = r.stdout.strip().split('\n')[-1]
        upperdir = [
            o.replace('upperdir=', '') for o in optstring.split(',')
            if o.startswith('upperdir=')
        ][0]
        cdir = upperdir.rsplit('/', 1)[0]
        if not cdir.startswith('/var/lib/docker/overlay/'):
            raise MountError('The device mounted at that location is not a '
                             'docker container.')
        return cdir.replace('/var/lib/docker/overlay/', '')

    def _unmount_overlay(self):
        """
        OverlayFS unmount backend.
        """
        if Mount.get_dev_at_mountpoint(self.mountpoint) != 'overlay':
            raise MountError('Device mounted at {} is not an atomic mount.')
        cid = self._get_overlay_mount_cid()
        Mount.unmount_path(self.mountpoint)
        self._cleanup_container(self.docker_client.inspect(cid))
from flask import Flask, request, abort, jsonify
from docker_client import DockerClient

import json


app = Flask(__name__)

docker = DockerClient()


@app.route("/docker/image/pull", methods=["POST"])
def image_pull():
    try:
        docker_img = json.loads(request.data).get("docker_img")
        image_tag = json.loads(request.data).get("image_tag")

        image = docker.pull_docker_image(docker_img, image_tag)
        return jsonify({
            "message": "the image: {}:{} has been updated".format(docker_img, image_tag),
            "image_id": image.id
        })

    except Exception as ex:
        print(ex)
        abort(status=500)


@app.route("/docker/container/update", methods=["POST"])
def update_container():
    try:
示例#13
0
def main():
    parser = argparse.ArgumentParser(description='CodaLab worker.')
    parser.add_argument('--tag',
                        help='Tag that allows for scheduling runs on specific '
                        'workers.')
    parser.add_argument(
        '--server',
        default='https://worksheets.codalab.org',
        help='URL of the CodaLab server, in the format '
        '<http|https>://<hostname>[:<port>] (e.g., https://worksheets.codalab.org)'
    )
    parser.add_argument('--work-dir',
                        default='codalab-worker-scratch',
                        help='Directory where to store temporary bundle data, '
                        'including dependencies and the data from run '
                        'bundles.')
    parser.add_argument('--network-prefix',
                        default='codalab_worker_network',
                        help='Docker network name prefix')
    parser.add_argument(
        '--cpuset',
        type=str,
        metavar='CPUSET_STR',
        default='ALL',
        help='Comma-separated list of CPUs in which to allow bundle execution, '
        '(e.g., \"0,2,3\", \"1\").')
    parser.add_argument(
        '--gpuset',
        type=str,
        metavar='GPUSET_STR',
        default='ALL',
        help='Comma-separated list of GPUs in which to allow bundle execution '
        '(e.g., \"0,1\", \"1\").')
    parser.add_argument('--max-work-dir-size',
                        type=str,
                        metavar='SIZE',
                        default='10g',
                        help='Maximum size of the temporary bundle data '
                        '(e.g., 3, 3k, 3m, 3g, 3t).')
    parser.add_argument(
        '--max-dependencies-serialized-length',
        type=int,
        default=60000,
        help='Maximum length of serialized json of dependency list of worker '
        '(e.g., 50, 30000, 60000).')
    parser.add_argument(
        '--max-image-cache-size',
        type=str,
        metavar='SIZE',
        help='Limit the disk space used to cache Docker images '
        'for worker jobs to the specified amount (e.g. '
        '3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
        'the least recently used images are removed first. '
        'Worker will not remove any images if this option '
        'is not specified.')
    parser.add_argument('--password-file',
                        help='Path to the file containing the username and '
                        'password for logging into the bundle service, '
                        'each on a separate line. If not specified, the '
                        'password is read from standard input.')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Whether to output verbose log messages.')
    parser.add_argument('--id',
                        default='%s(%d)' % (socket.gethostname(), os.getpid()),
                        help='Internal use: ID to use for the worker.')
    parser.add_argument(
        '--shared-file-system',
        action='store_true',
        help='Internal use: Whether the file system containing '
        'bundle data is shared between the bundle service '
        'and the worker.')
    args = parser.parse_args()

    # Get the username and password.
    logger.info('Connecting to %s' % args.server)
    if args.password_file:
        if os.stat(args.password_file).st_mode & (stat.S_IRWXG | stat.S_IRWXO):
            print >> sys.stderr, """
Permissions on password file are too lax.
Only the user should be allowed to access the file.
On Linux, run:
chmod 600 %s""" % args.password_file
            exit(1)
        with open(args.password_file) as f:
            username = f.readline().strip()
            password = f.readline().strip()
    else:
        username = os.environ.get('CODALAB_USERNAME')
        if username is None:
            username = raw_input('Username: '******'CODALAB_PASSWORD')
        if password is None:
            password = getpass.getpass()

    # Set up logging.
    if args.verbose:
        logging.basicConfig(format='%(asctime)s %(message)s',
                            level=logging.DEBUG)
    else:
        logging.basicConfig(format='%(asctime)s %(message)s',
                            level=logging.INFO)

    max_work_dir_size_bytes = parse_size(args.max_work_dir_size)
    if args.max_image_cache_size is None:
        max_images_bytes = None
    else:
        max_images_bytes = parse_size(args.max_image_cache_size)

    docker_client = DockerClient()

    # transform/verify cpuset and gpuset
    cpuset = parse_cpuset_args(args.cpuset)
    gpuset = parse_gpuset_args(docker_client, args.gpuset)

    worker = Worker(args.id, args.tag, args.work_dir, cpuset, gpuset,
                    max_work_dir_size_bytes,
                    args.max_dependencies_serialized_length, max_images_bytes,
                    args.shared_file_system,
                    BundleServiceClient(args.server, username, password),
                    docker_client, args.network_prefix)

    # Register a signal handler to ensure safe shutdown.
    for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
        signal.signal(sig, lambda signup, frame: worker.signal())

    # BEGIN: DO NOT CHANGE THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING
    # THIS IS HERE TO KEEP TEST-CLI FROM HANGING
    print('Worker started.')
    # END

    worker.run()