Esempio n. 1
0
class MesosScheduler(Scheduler):
    def __init__(self, manager, master, options):
        Scheduler.__init__(self,manager)
        self.master = master
        self.cpus = options.cpus
        self.mem = parse_mem(options.mem)
        self.gpus = options.gpus
        self.task_per_node = options.parallel or multiprocessing.cpu_count()
        self.options = options
        self.group = options.group
        self.last_finish_time = 0
        self.executor = None
        self.driver = None
        self.lock = threading.RLock()
        self.task_waiting = []
        self.task_launched = {}
        self.slaveTasks = {}
        self.starting = False

    def start_driver(self):
        name = 'OpenCluster'
        if self.options.name :
            name = "%s-%s" % (name,self.options.name)
        else:
            name = "%s-%s" % (name,datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))

        if len(name) > 256:
            name = name[:256] + '...'

        framework = mesos_pb2.FrameworkInfo()
        framework.user = getuser()
        if framework.user == 'root':
            raise Exception("OpenCluster is not allowed to run as 'root'")
        framework.name = name
        framework.hostname = socket.gethostname()

        self.driver = MesosSchedulerDriver(self, framework, self.master)
        self.driver.start()
        logger.debug("Mesos Scheudler driver started")

        self.shuttingdown = False
        self.last_finish_time = time.time()
        self.stopped = False
        #
        # def check():
        #     while self.started:
        #         now = time.time()
        #         if not self.task_waiting and now - self.last_finish_time > MAX_IDLE_TIME:
        #             logger.info("stop mesos scheduler after %d seconds idle", now - self.last_finish_time)
        #             self.shutdown()
        #             break
        #         time.sleep(1)
        #
        #         if len(self.task_success()) + len(self.task_failed) == self.taskNum:
        #             self.shutdown()
        # spawn(check)

    @safe
    def registered(self, driver, frameworkId, masterInfo):
        self.started = True
        logger.debug("connect to master %s:%s(%s), registered as %s",
            int2ip(masterInfo.ip), masterInfo.port, masterInfo.id,
            frameworkId.value)
        self.executor = self.getExecutorInfo(str(frameworkId.value))

    @safe
    def reregistered(self, driver, masterInfo):
        logger.warning("re-connect to mesos master %s:%s(%s)",
            int2ip(masterInfo.ip), masterInfo.port, masterInfo.id)

    @safe
    def disconnected(self, driver):
        logger.debug("framework is disconnected")

    @safe
    def getExecutorInfo(self, framework_id):
        execInfo = mesos_pb2.ExecutorInfo()
        execInfo.executor_id.value = "multiframework"
        execInfo.command.value = '%s %s' % (
            sys.executable, # /usr/bin/python.exe or .../python
            os.path.abspath(os.path.join(os.path.dirname(__file__), 'simpleexecutor.py'))
        )
        v = execInfo.command.environment.variables.add()
        v.name = 'UID'
        v.value = str(os.getuid())
        v = execInfo.command.environment.variables.add()
        v.name = 'GID'
        v.value = str(os.getgid())

        if hasattr(execInfo, 'framework_id'):
            execInfo.framework_id.value = str(framework_id)

        Script = os.path.realpath(sys.argv[0])
        if hasattr(execInfo, 'name'):
            execInfo.name = Script

        execInfo.data = marshal.dumps((Script, os.getcwd(), sys.path, dict(os.environ), self.task_per_node, env.environ))

        return execInfo
    @safe
    def clearCache(self):
        self.task_launched.clear()
        self.slaveTasks.clear()

    @safe
    def submitTasks(self, tasks):
        if not tasks:
            return
        self.completionEvents.join() #Blocks until all items in the events queue have been gotten and processed.
        self.clearCache()
        self.task_waiting.extend(tasks)
        self.taskNum = self.taskNum + len(tasks)
        logger.debug("Got job with %d tasks",  len(tasks))

        if not self.started and not self.starting:
            self.starting = True
            self.start_driver()
        while not self.started:
            self.lock.release()
            time.sleep(0.01)
            self.lock.acquire()

        self.requestMoreResources()
        self.manager.statusUpdate()

    def requestMoreResources(self):
        if self.started:
            self.driver.reviveOffers()

    @safe
    def resourceOffers(self, driver, offers):

        rf = mesos_pb2.Filters()
        if not self.task_waiting:
            rf.refuse_seconds = 5
            for o in offers:
                driver.launchTasks(o.id, [], rf)
            return

        random.shuffle(offers)
        self.last_offer_time = time.time()
        for offer in offers:
            if self.shuttingdown:
                print "Shutting down: declining offer on [%s]" % offer.hostname
                driver.declineOffer(offer.id)
                continue

            attrs = self.getAttributes(offer)
            if self.options.group and attrs.get('group', 'None') not in self.options.group:
                driver.launchTasks(offer.id, [], rf)
                continue

            cpus, mem, gpus = self.getResources(offer)
            logger.debug("got resource offer %s: cpus:%s, mem:%s, gpus:%s at %s", offer.id.value, cpus, mem, gpus, offer.hostname)
            logger.debug("attributes,gpus:%s",attrs.get('gpus', None))
            sid = offer.slave_id.value
            tasks = []
            while (len(self.task_waiting)>0 and cpus >= self.cpus and mem >= self.mem and (self.gpus==0 or attrs.get('gpus', None) is not None)):

                logger.debug("Accepting resource on slave %s (%s)", offer.slave_id.value, offer.hostname)
                t = self.task_waiting.pop()
                t.state = mesos_pb2.TASK_STARTING
                t.state_time = time.time()

                task = self.create_task(offer, t, cpus)
                tasks.append(task)

                self.task_launched[t.id] = t
                self.slaveTasks.setdefault(sid, set()).add(t.id)

                cpus -= self.cpus
                mem -= self.mem
                # gpus -= self.gpus

            operation = mesos_pb2.Offer.Operation()
            operation.type = mesos_pb2.Offer.Operation.LAUNCH
            operation.launch.task_infos.extend(tasks)
            driver.acceptOffers([offer.id], [operation])

    @safe
    def offerRescinded(self, driver, offer_id):
        logger.debug("rescinded offer: %s", offer_id)
        if self.task_waiting:
            self.requestMoreResources()

    def getResources(self, offer):
        cpus, mem, gpus = 0, 0, 0
        for r in offer.resources:
            if r.name == 'gpus':
                gpus = float(r.scalar.value)
            elif r.name == 'cpus':
                cpus = float(r.scalar.value)
            elif r.name == 'mem':
                mem = float(r.scalar.value)
        return cpus, mem, gpus

    def getResource(self, res, name):
        for r in res:
            if r.name == name:
                return r.scalar.value
        return 0

    def getAttribute(self, attrs, name):
        for r in attrs:
            if r.name == name:
                return r.scalar.value

    def getAttributes(self, offer):
        attrs = {}
        for a in offer.attributes:
            attrs[a.name] = a.scalar.value
        return attrs

    def create_task(self, offer, t, cpus):
        task = mesos_pb2.TaskInfo()

        task.task_id.value = t.id
        task.slave_id.value = offer.slave_id.value
        task.name = "task(%s/%d)" % (t.id, self.taskNum)
        task.executor.MergeFrom(self.executor)

        task.data = compress(cPickle.dumps((t, t.tried), -1))

        cpu = task.resources.add()
        cpu.name = "cpus"
        cpu.type = 0 # mesos_pb2.Value.SCALAR
        cpu.scalar.value = min(self.cpus, cpus)

        mem = task.resources.add()
        mem.name = "mem"
        mem.type = 0 # mesos_pb2.Value.SCALAR
        mem.scalar.value = self.mem
        #
        # gpu = task.resources.add()
        # gpu.name = "gpus"
        # gpu.type = 0 # mesos_pb2.Value.SCALAR
        # gpu.scalar.value = self.gpus

        return task

    @safe
    def statusUpdate(self, driver, update):
        logger.debug("Task %s in state [%s]" % (update.task_id.value, mesos_pb2.TaskState.Name(update.state)))
        tid = str(update.task_id.value)

        if tid not in self.task_launched:
            # check failed after launched
            for t in self.task_waiting:
                if t.id == tid:
                    self.task_launched[tid] = t
                    self.task_waiting.remove(t)
                    break
            else:
                logger.debug("Task %s is finished, ignore it", tid)
                return

        t = self.task_launched[tid]
        t.state = update.state
        t.state_time = time.time()
        self.last_finish_time = t.state_time

        if update.state == mesos_pb2.TASK_RUNNING:
            self.started = True
            # to do task timeout handler
        elif update.state == mesos_pb2.TASK_LOST:
            self.task_launched.pop(tid)

            if t.tried < self.options.retry:
                t.tried += 1
                logger.warning("task %s lost, retry %s", t.id, update.state, t.tried)
                self.task_waiting.append(t) # try again
            else:
                self.taskEnded(t, OtherFailure("task lost,exception:" + str(update.data)), "task lost")

        elif update.state in (mesos_pb2.TASK_FINISHED, mesos_pb2.TASK_FAILED, mesos_pb2.TASK_ERROR, mesos_pb2.TASK_KILLED):
            self.task_launched.pop(tid)

            slave = None
            for s in self.slaveTasks:
                if tid in self.slaveTasks[s]:
                    slave = s
                    self.slaveTasks[s].remove(tid)
                    break

            if update.state == mesos_pb2.TASK_FINISHED :
                self.taskEnded(t, Success(), update.data)

            if update.state == mesos_pb2.TASK_ERROR :
                logger.error(update.message)
                self.taskEnded(t, OtherFailure(update.message), update.message)
                driver.abort()
                self.shutdown()

            if update.state == mesos_pb2.TASK_FAILED or update.state == mesos_pb2.TASK_KILLED or update.state == mesos_pb2.TASK_LOST:
                if t.tried < self.options.retry:
                    t.tried += 1
                    logger.warning("task %s failed with %s, retry %s", t.id, update.state, t.tried)
                    self.task_waiting.append(t) # try again
                else:
                    self.taskEnded(t, OtherFailure("exception:" + str(update.data)), None)
                    logger.error("task %s failed on %s", t.id, slave)

        if not self.task_waiting:
            self.requestMoreResources() # request more offers again

    @safe
    def check(self, driver):
        now = time.time()
        for tid, t in self.task_launched.items():
            if t.state == mesos_pb2.TASK_STARTING and t.state_time + 30 < now:
                logger.warning("task %s lauched failed, assign again", tid)
                if not self.task_waiting:
                    self.requestMoreResources()
                t.tried += 1
                t.state = -1
                self.task_launched.pop(tid)
                self.task_waiting.append(t)
            # TODO: check run time

    @safe
    def shutdown(self):
        if not self.started:
            return

        wait_started = datetime.datetime.now()
        while (len(self.task_launched) > 0) and \
          (SHUTDOWN_TIMEOUT > (datetime.datetime.now() - wait_started).seconds):
            time.sleep(1)

        logger.debug("total:%d, task finished: %d,task failed: %d", self.taskNum, self.finished_count, self.fail_count)

        self.shuttingdown = True
        # self.driver.join()
        self.driver.stop(False)

        #self.driver = None
        logger.debug("scheduler stop!!!")
        self.stopped = True
        self.started = False

    @safe
    def error(self, driver, code):
        logger.warning("Mesos error message: %s", code)

    def defaultParallelism(self):
        return 16

    def frameworkMessage(self, driver, executor, slave, data):
        logger.warning("[slave %s] %s", slave.value, data)

    def executorLost(self, driver, executorId, slaveId, status):
        logger.warning("executor at %s %s lost: %s", slaveId.value, executorId.value, status)
        self.slaveTasks.pop(slaveId.value, None)

    def slaveLost(self, driver, slaveId):
        logger.warning("slave %s lost", slaveId.value)
        self.slaveTasks.pop(slaveId.value, None)

    def killTask(self, job_id, task_id, tried):
        tid = mesos_pb2.TaskID()
        tid.value = "%s:%s:%s" % (job_id, task_id, tried)
        self.driver.killTask(tid)
Esempio n. 2
0
def start_factory_mesos():
    global pyroLoopCondition
    parser = OptionParser(
        usage="Usage: python factorymesos.py [options] <command>")
    parser.allow_interspersed_args = False
    parser.add_option("-s",
                      "--master",
                      type="string",
                      default="",
                      help="url of master (mesos://172.31.252.180:5050)")
    parser.add_option("-f",
                      "--factory",
                      type="string",
                      default="",
                      help="host:port of master (172.31.252.180:6666)")
    parser.add_option(
        "-w",
        "--warehouse_addr",
        type="string",
        default="",
        help=
        "kafka-172.31.252.182:9092|mysql-172.31.254.25:3306,db,username,password"
    )
    parser.add_option("-p",
                      "--task_per_node",
                      type="int",
                      default=0,
                      help="max number of tasks on one node (default: 0)")
    parser.add_option("-I",
                      "--image",
                      type="string",
                      help="image name for Docker")
    parser.add_option("-V",
                      "--volumes",
                      type="string",
                      help="volumes to mount into Docker")
    parser.add_option("-r",
                      "--retry",
                      type="int",
                      default=0,
                      help="retry times when failed (default: 0)")
    parser.add_option(
        "-e",
        "--config",
        type="string",
        default="/work/opencluster/config.ini",
        help=
        "absolute path of configuration file(default:/work/opencluster/config.ini)"
    )

    parser.add_option("-g",
                      "--group",
                      type="string",
                      default='',
                      help="which group to run (default: ''")
    parser.add_option(
        "-q",
        "--quiet",
        action="store_true",
        help="be quiet",
    )
    parser.add_option(
        "-v",
        "--verbose",
        action="store_true",
        help="show more useful log",
    )

    (options, command) = parser.parse_args()

    if not options:
        parser.print_help()
        sys.exit(2)

    if options.config:
        Conf.setConfigFile(options.config)

    options.master = options.master or Conf.getMesosMaster()
    options.warehouse_addr = options.warehouse_addr or Conf.getWareHouseAddr()

    servers = options.factory or Conf.getFactoryServers()
    servs = servers.split(",")
    server = servs[0].split(":")

    options.logLevel = (options.quiet and logging.ERROR
                        or options.verbose and logging.DEBUG or logging.INFO)
    setLogger(Conf.getFactoryServiceName(), "MESOS", options.logLevel)

    implicitAcknowledgements = 1
    if os.getenv("MESOS_EXPLICIT_ACKNOWLEDGEMENTS"):
        implicitAcknowledgements = 0
    sched = FactoryMesos(options, command, implicitAcknowledgements)

    driver = MesosSchedulerDriver(sched, sched.framework, options.master,
                                  implicitAcknowledgements)
    driver.start()
    logger.debug("Mesos Scheudler driver started")

    warehouse_addrs = options.warehouse_addr.split(",")

    def fetchTasksFromMySQL():
        global pyroLoopCondition
        mysqlIpAndPort = warehouse_addrs[0].split(":")
        last_data_time = time.time()

        while pyroLoopCondition:
            db = MySQLdb.connect(host=mysqlIpAndPort[0],
                                 port=int(mysqlIpAndPort[1]),
                                 db=warehouse_addrs[1],
                                 user=warehouse_addrs[2],
                                 passwd=warehouse_addrs[3])
            try:
                cur = db.cursor()
                curUpt = db.cursor()
                dataResults = cur.execute(
                    "select task_id,task_desc,task_start_time,status from t_task where status=0 order by priority asc limit 200"
                )
                results = cur.fetchmany(dataResults)
                for r in results:
                    sched.append_task(cPickle.loads(r[1]))
                    curUpt.execute(
                        "update t_task set task_start_time=now(),status=1 where task_id='"
                        + r[0] + "'")
                if len(results) > 0:
                    db.commit()
                    last_data_time = time.time()
                    driver.reviveOffers()

                if sched.tasks_total_len() > MAX_WAITING_TASK:
                    time.sleep(2)
                if time.time() - last_data_time > MAX_EMPTY_TASK_PERIOD:
                    time.sleep(10)

                if cur:
                    cur.close()
                if curUpt:
                    curUpt.close()
            finally:
                db.close()

    def fetchTasksFromKafka(priority):
        global pyroLoopCondition

        consumer = KafkaConsumer('OpenCluster%s' % priority,
                                 bootstrap_servers=[options.warehouse_addr],
                                 group_id="cnlab",
                                 auto_commit_enable=True,
                                 auto_commit_interval_ms=30 * 1000,
                                 auto_offset_reset='smallest')

        last_data_time = time.time()
        while pyroLoopCondition:
            for message in consumer.fetch_messages():
                logger.error("%s:%s:%s: key=%s " %
                             (message.topic, message.partition, message.offset,
                              message.key))
                sched.append_task(cPickle.loads(message.value))
                consumer.task_done(message)
                last_data_time = time.time()
            if sched.tasks_len(priority) > MAX_WAITING_TASK:
                time.sleep(2)
            if time.time() - last_data_time > MAX_EMPTY_TASK_PERIOD:
                time.sleep(10)

    if len(warehouse_addrs) > 2:
        spawn(fetchTasksFromMySQL)
    else:
        for i in range(1, sched.priority_size + 1):
            spawn(fetchTasksFromKafka, i)

    def handler(signm, frame):
        logger.warning("got signal %d, exit now", signm)
        sched.stop(3)

    signal.signal(signal.SIGTERM, handler)
    signal.signal(signal.SIGABRT, handler)

    try:
        while not sched.stopped:
            time.sleep(0.5)
            sched.check(driver)

            now = time.time()
            if now > sched.last_offer_time + 60 + random.randint(0, 5):
                logger.warning("too long to get offer, reviving...")
                sched.last_offer_time = now
                driver.reviveOffers()

    except KeyboardInterrupt:
        logger.warning(
            'stopped by KeyboardInterrupt. The Program is exiting gracefully! Please wait...'
        )
        sched.stop(4)

    #terminate pyrothread
    pyroLoopCondition = False

    time.sleep(5)
    driver.stop(False)
    sys.exit(sched.status)
Esempio n. 3
0
def main():

    global shutdown
    global accept_offers
    global driver

    cfg = get_configuration()

    # Configure logging
    setup_logging(cfg)

    framework = mesos_framework(cfg)
    #credentials = mesos_credentials(cfg)

    mesos_scheduler = OurJobScheduler(cfg)
    #driver = MesosSchedulerDriver(mesos_scheduler, framework,
    #                              cfg.mesos.master, cfg.mesos.imp_ack,
    #                              credentials)
    driver = MesosSchedulerDriver(mesos_scheduler, framework, cfg.mesos.master,
                                  cfg.mesos.imp_ack)

    shutdown = Shutdown()
    accept_offers = AcceptOffers()

    # driver.run() blocks, so run it in a separate thread.
    def run_driver_async():
        status = 0 if driver.run() == MesosPb2.DRIVER_STOPPED else 1

        if cfg.debug > 0:
            logger.debug('Stopping Driver')
        driver.stop()

        logger.info('Terminating Framework')
        sys.exit(status)

    framework_thread = Thread(target=run_driver_async, args=())
    framework_thread.start()

    logger.info('Beginning Processing')

    while framework_thread.is_alive():
        # If a shutdown has been requested, suppress offers and wait for the
        # framework thread to complete.
        if shutdown.flag:
            if cfg.debug > 0:
                logger.debug('Suppressing Offers')
            driver.suppressOffers()

            while framework_thread.is_alive():
                logger.debug('Child Thread Still Alive')
                sleep(5)

            break

        # If the max number of jobs are already running, suppress offers and
        # wait for some jobs to finish.
        if (mesos_scheduler.tasks_launched == cfg.mesos.max_jobs):
            driver.suppressOffers()

            if cfg.debug > 0:
                logger.debug('Suppressing Offers')

            # Sleep until we have room for more tasks
            while (not shutdown.flag
                   and mesos_scheduler.tasks_launched == cfg.mesos.max_jobs):
                if cfg.debug > 0:
                    logger.debug('Waiting for more available tasks')
                sleep(5)

            # Sleep until more processing is requested
            while not shutdown.flag and not mesos_scheduler.have_work():
                if cfg.debug > 0:
                    logger.debug('Waiting for more work')
                sleep(5)

            if not shutdown.flag:
                if cfg.debug > 0:
                    logger.debug('Reviving Offers')
                driver.reviveOffers()

        # If there's no new work to be done, suppress offers until we have
        # more work
        if not shutdown.flag and not mesos_scheduler.have_work():
            driver.suppressOffers()

            # Sleep until more processing is requested
            while not shutdown.flag and not mesos_scheduler.have_work():
                if cfg.debug > 0:
                    logger.debug('Waiting for more work')
                sleep(5)

            if not shutdown.flag:
                if cfg.debug > 0:
                    logger.debug('Reviving Offers')
                driver.reviveOffers()

        # Sleep for a second, so that we are not flying through the loop
        sleep(1)

    logger.info('Terminated Processing')
Esempio n. 4
0
class MesosScheduler(Scheduler):
    def __init__(self, manager, master, options):
        Scheduler.__init__(self, manager)
        self.master = master
        self.cpus = options.cpus
        self.mem = parse_mem(options.mem)
        self.gpus = options.gpus
        self.task_per_node = options.parallel or multiprocessing.cpu_count()
        self.options = options
        self.group = options.group
        self.last_finish_time = 0
        self.executor = None
        self.driver = None
        self.lock = threading.RLock()
        self.task_waiting = []
        self.task_launched = {}
        self.slaveTasks = {}
        self.starting = False

    def start_driver(self):
        name = 'OpenCluster'
        if self.options.name:
            name = "%s-%s" % (name, self.options.name)
        else:
            name = "%s-%s" % (
                name, datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))

        if len(name) > 256:
            name = name[:256] + '...'

        framework = mesos_pb2.FrameworkInfo()
        framework.user = getuser()
        if framework.user == 'root':
            raise Exception("OpenCluster is not allowed to run as 'root'")
        framework.name = name
        framework.hostname = socket.gethostname()

        self.driver = MesosSchedulerDriver(self, framework, self.master)
        self.driver.start()
        logger.debug("Mesos Scheudler driver started")

        self.shuttingdown = False
        self.last_finish_time = time.time()
        self.stopped = False
        #
        # def check():
        #     while self.started:
        #         now = time.time()
        #         if not self.task_waiting and now - self.last_finish_time > MAX_IDLE_TIME:
        #             logger.info("stop mesos scheduler after %d seconds idle", now - self.last_finish_time)
        #             self.shutdown()
        #             break
        #         time.sleep(1)
        #
        #         if len(self.task_success()) + len(self.task_failed) == self.taskNum:
        #             self.shutdown()
        # spawn(check)

    @safe
    def registered(self, driver, frameworkId, masterInfo):
        self.started = True
        logger.debug("connect to master %s:%s(%s), registered as %s",
                     int2ip(masterInfo.ip), masterInfo.port, masterInfo.id,
                     frameworkId.value)
        self.executor = self.getExecutorInfo(str(frameworkId.value))

    @safe
    def reregistered(self, driver, masterInfo):
        logger.warning("re-connect to mesos master %s:%s(%s)",
                       int2ip(masterInfo.ip), masterInfo.port, masterInfo.id)

    @safe
    def disconnected(self, driver):
        logger.debug("framework is disconnected")

    @safe
    def getExecutorInfo(self, framework_id):
        execInfo = mesos_pb2.ExecutorInfo()
        execInfo.executor_id.value = "multiframework"
        execInfo.command.value = '%s %s' % (
            sys.executable,  # /usr/bin/python.exe or .../python
            os.path.abspath(
                os.path.join(os.path.dirname(__file__), 'simpleexecutor.py')))
        v = execInfo.command.environment.variables.add()
        v.name = 'UID'
        v.value = str(os.getuid())
        v = execInfo.command.environment.variables.add()
        v.name = 'GID'
        v.value = str(os.getgid())

        if hasattr(execInfo, 'framework_id'):
            execInfo.framework_id.value = str(framework_id)

        Script = os.path.realpath(sys.argv[0])
        if hasattr(execInfo, 'name'):
            execInfo.name = Script

        execInfo.data = marshal.dumps(
            (Script, os.getcwd(), sys.path, dict(os.environ),
             self.task_per_node, env.environ))

        return execInfo

    @safe
    def clearCache(self):
        self.task_launched.clear()
        self.slaveTasks.clear()

    @safe
    def submitTasks(self, tasks):
        if not tasks:
            return
        self.completionEvents.join(
        )  #Blocks until all items in the events queue have been gotten and processed.
        self.clearCache()
        self.task_waiting.extend(tasks)
        self.taskNum = self.taskNum + len(tasks)
        logger.debug("Got job with %d tasks", len(tasks))

        if not self.started and not self.starting:
            self.starting = True
            self.start_driver()
        while not self.started:
            self.lock.release()
            time.sleep(0.01)
            self.lock.acquire()

        self.requestMoreResources()
        self.manager.statusUpdate()

    def requestMoreResources(self):
        if self.started:
            self.driver.reviveOffers()

    @safe
    def resourceOffers(self, driver, offers):

        rf = mesos_pb2.Filters()
        if not self.task_waiting:
            rf.refuse_seconds = 5
            for o in offers:
                driver.launchTasks(o.id, [], rf)
            return

        random.shuffle(offers)
        self.last_offer_time = time.time()
        for offer in offers:
            if self.shuttingdown:
                print "Shutting down: declining offer on [%s]" % offer.hostname
                driver.declineOffer(offer.id)
                continue

            attrs = self.getAttributes(offer)
            if self.options.group and attrs.get(
                    'group', 'None') not in self.options.group:
                driver.launchTasks(offer.id, [], rf)
                continue

            cpus, mem, gpus = self.getResources(offer)
            logger.debug(
                "got resource offer %s: cpus:%s, mem:%s, gpus:%s at %s",
                offer.id.value, cpus, mem, gpus, offer.hostname)
            logger.debug("attributes,gpus:%s", attrs.get('gpus', None))
            sid = offer.slave_id.value
            tasks = []
            while (len(self.task_waiting) > 0 and cpus >= self.cpus
                   and mem >= self.mem and
                   (self.gpus == 0 or attrs.get('gpus', None) is not None)):

                logger.debug("Accepting resource on slave %s (%s)",
                             offer.slave_id.value, offer.hostname)
                t = self.task_waiting.pop()
                t.state = mesos_pb2.TASK_STARTING
                t.state_time = time.time()

                task = self.create_task(offer, t, cpus)
                tasks.append(task)

                self.task_launched[t.id] = t
                self.slaveTasks.setdefault(sid, set()).add(t.id)

                cpus -= self.cpus
                mem -= self.mem
                # gpus -= self.gpus

            operation = mesos_pb2.Offer.Operation()
            operation.type = mesos_pb2.Offer.Operation.LAUNCH
            operation.launch.task_infos.extend(tasks)
            driver.acceptOffers([offer.id], [operation])

    @safe
    def offerRescinded(self, driver, offer_id):
        logger.debug("rescinded offer: %s", offer_id)
        if self.task_waiting:
            self.requestMoreResources()

    def getResources(self, offer):
        cpus, mem, gpus = 0, 0, 0
        for r in offer.resources:
            if r.name == 'gpus':
                gpus = float(r.scalar.value)
            elif r.name == 'cpus':
                cpus = float(r.scalar.value)
            elif r.name == 'mem':
                mem = float(r.scalar.value)
        return cpus, mem, gpus

    def getResource(self, res, name):
        for r in res:
            if r.name == name:
                return r.scalar.value
        return 0

    def getAttribute(self, attrs, name):
        for r in attrs:
            if r.name == name:
                return r.scalar.value

    def getAttributes(self, offer):
        attrs = {}
        for a in offer.attributes:
            attrs[a.name] = a.scalar.value
        return attrs

    def create_task(self, offer, t, cpus):
        task = mesos_pb2.TaskInfo()

        task.task_id.value = t.id
        task.slave_id.value = offer.slave_id.value
        task.name = "task(%s/%d)" % (t.id, self.taskNum)
        task.executor.MergeFrom(self.executor)

        task.data = compress(cPickle.dumps((t, t.tried), -1))

        cpu = task.resources.add()
        cpu.name = "cpus"
        cpu.type = 0  # mesos_pb2.Value.SCALAR
        cpu.scalar.value = min(self.cpus, cpus)

        mem = task.resources.add()
        mem.name = "mem"
        mem.type = 0  # mesos_pb2.Value.SCALAR
        mem.scalar.value = self.mem
        #
        # gpu = task.resources.add()
        # gpu.name = "gpus"
        # gpu.type = 0 # mesos_pb2.Value.SCALAR
        # gpu.scalar.value = self.gpus

        return task

    @safe
    def statusUpdate(self, driver, update):
        logger.debug(
            "Task %s in state [%s]" %
            (update.task_id.value, mesos_pb2.TaskState.Name(update.state)))
        tid = str(update.task_id.value)

        if tid not in self.task_launched:
            # check failed after launched
            for t in self.task_waiting:
                if t.id == tid:
                    self.task_launched[tid] = t
                    self.task_waiting.remove(t)
                    break
            else:
                logger.debug("Task %s is finished, ignore it", tid)
                return

        t = self.task_launched[tid]
        t.state = update.state
        t.state_time = time.time()
        self.last_finish_time = t.state_time

        if update.state == mesos_pb2.TASK_RUNNING:
            self.started = True
            # to do task timeout handler
        elif update.state == mesos_pb2.TASK_LOST:
            self.task_launched.pop(tid)

            if t.tried < self.options.retry:
                t.tried += 1
                logger.warning("task %s lost, retry %s", t.id, update.state,
                               t.tried)
                self.task_waiting.append(t)  # try again
            else:
                self.taskEnded(
                    t, OtherFailure("task lost,exception:" + str(update.data)),
                    "task lost")

        elif update.state in (mesos_pb2.TASK_FINISHED, mesos_pb2.TASK_FAILED,
                              mesos_pb2.TASK_ERROR, mesos_pb2.TASK_KILLED):
            self.task_launched.pop(tid)

            slave = None
            for s in self.slaveTasks:
                if tid in self.slaveTasks[s]:
                    slave = s
                    self.slaveTasks[s].remove(tid)
                    break

            if update.state == mesos_pb2.TASK_FINISHED:
                self.taskEnded(t, Success(), update.data)

            if update.state == mesos_pb2.TASK_ERROR:
                logger.error(update.message)
                self.taskEnded(t, OtherFailure(update.message), update.message)
                driver.abort()
                self.shutdown()

            if update.state == mesos_pb2.TASK_FAILED or update.state == mesos_pb2.TASK_KILLED or update.state == mesos_pb2.TASK_LOST:
                if t.tried < self.options.retry:
                    t.tried += 1
                    logger.warning("task %s failed with %s, retry %s", t.id,
                                   update.state, t.tried)
                    self.task_waiting.append(t)  # try again
                else:
                    self.taskEnded(
                        t, OtherFailure("exception:" + str(update.data)), None)
                    logger.error("task %s failed on %s", t.id, slave)

        if not self.task_waiting:
            self.requestMoreResources()  # request more offers again

    @safe
    def check(self, driver):
        now = time.time()
        for tid, t in self.task_launched.items():
            if t.state == mesos_pb2.TASK_STARTING and t.state_time + 30 < now:
                logger.warning("task %s lauched failed, assign again", tid)
                if not self.task_waiting:
                    self.requestMoreResources()
                t.tried += 1
                t.state = -1
                self.task_launched.pop(tid)
                self.task_waiting.append(t)
            # TODO: check run time

    @safe
    def shutdown(self):
        if not self.started:
            return

        wait_started = datetime.datetime.now()
        while (len(self.task_launched) > 0) and \
          (SHUTDOWN_TIMEOUT > (datetime.datetime.now() - wait_started).seconds):
            time.sleep(1)

        logger.debug("total:%d, task finished: %d,task failed: %d",
                     self.taskNum, self.finished_count, self.fail_count)

        self.shuttingdown = True
        # self.driver.join()
        self.driver.stop(False)

        #self.driver = None
        logger.debug("scheduler stop!!!")
        self.stopped = True
        self.started = False

    @safe
    def error(self, driver, code):
        logger.warning("Mesos error message: %s", code)

    def defaultParallelism(self):
        return 16

    def frameworkMessage(self, driver, executor, slave, data):
        logger.warning("[slave %s] %s", slave.value, data)

    def executorLost(self, driver, executorId, slaveId, status):
        logger.warning("executor at %s %s lost: %s", slaveId.value,
                       executorId.value, status)
        self.slaveTasks.pop(slaveId.value, None)

    def slaveLost(self, driver, slaveId):
        logger.warning("slave %s lost", slaveId.value)
        self.slaveTasks.pop(slaveId.value, None)

    def killTask(self, job_id, task_id, tried):
        tid = mesos_pb2.TaskID()
        tid.value = "%s:%s:%s" % (job_id, task_id, tried)
        self.driver.killTask(tid)