class HandCockpit:

	MIN_VALUE = 0
	MAX_VALUE = 1
	INITIAL_SET = 0

	SCAN_INTERVAL_SEC = 0.1

	# 操作対象用インデックス
	WAIST = 0 # 左右に回転する部分
	BOOM = 1 # 台座から伸びる部分。left servo
	ARM = 2 # boomからcrawまでの部分。right servo
	CRAW = 3 # バケットに相当する部分。先端の爪


	# 物理的な可動範囲[min, max] 0〜180の範囲内であること
	# サンプルを参考にする
	# see: https://www.mearm.com/blogs/news/74739717-mearm-on-the-raspberry-pi-work-in-progress
	RANGE = (
		(0, 180), # waist
		(60, 165), # boom
		(40, 180), # arm
		(60, 180), # craw
	)

	# コントローラーの感度係数
	SENSITIVITY = (
		-50.0, # waist
		-50.0, # boom
		50.0, # arm
		50.0, # craw
	)

	# 定義済み位置(WAIST, BOOM, ARM, CRAW) 0〜180の範囲
	PRESET = (
		(90, 112, 90, 60), # initial position
		(20, 30, 40, 50), # topキー用
	)

	def __init__(self, controller, pad):
		# initial servo position
		self.position = list(HandCockpit.PRESET[HandCockpit.INITIAL_SET])
		# controllerはapplyメソッドを持っている必要がある
		self.controller = controller
		self.pad = pad

		self.lastUpdateAt = time.time()
		self.isStartedAxisScanThread = False
		self.isEnabledDebugPad = True

		# 初期位置設定
		self._apply()


	def update(self, powerList, delay):
		for index in self.controller.getIndexes():
			self.position[index] += powerList[index] * delay * HandCockpit.SENSITIVITY[index]
			if self.position[index] < HandCockpit.RANGE[index][HandCockpit.MIN_VALUE]:
				self.position[index] = HandCockpit.RANGE[index][HandCockpit.MIN_VALUE]
			elif self.position[index] > HandCockpit.RANGE[index][HandCockpit.MAX_VALUE]:
				self.position[index] = HandCockpit.RANGE[index][HandCockpit.MAX_VALUE]
		self._apply()

	def usePreset(self, number):
		self.position = list(HandCockpit.PRESET[number])
		self._apply()

	def _apply(self):
		self.controller.apply(self.position)

	def startAxisScanThread(self):
		pygame.event.set_blocked(pygame.JOYAXISMOTION)
		self.axisScanThread = RepeatedTimer(HandCockpit.SCAN_INTERVAL_SEC, self.scanAxis)
		self.axisScanThread.start()
		self.isStartedAxisScanThread = True

	def stopAxisScanThread(self):
		if True == self.isStartedAxisScanThread:
			self.isStartedAxisScanThread = False
			self.axisScanThread.cancel()
			pygame.event.set_blocked(None)

	def scanAxis(self):
		now = time.time()
		delay = now - self.lastUpdateAt
		self.lastUpdateAt = now

		x1 = self.pad.getAnalog(PS3PadPygame.L3_AX)
		y1 = self.pad.getAnalog(PS3PadPygame.L3_AY)
		x2 = self.pad.getAnalog(PS3PadPygame.R3_AX)
		y2 = self.pad.getAnalog(PS3PadPygame.R3_AY)
		if self.isEnabledDebugPad:
			log = ('delay: %.3f, x1: %.3f, y1: %.3f, x2: %.3f, y2: %.3f' % (delay, x1, y1, x2, y2))
			print log

		# バックホーの操作レバー割当はJIS方式だと以下のようになっている
		# 	(左レバー)			(右レバー)
		#	アーム伸ばし			ブーム下げ
		# 左旋回	○ 右旋回	 バケット掘削 ○ バケット開放
		#	アーム曲げ			ブーム上げ
		# よってバケットをクローに置き換えて
		# (WAIST, BOOM, ARM, CRAW)の順に整理する
		self.update((x1, y1, y2, x2), delay)

	def consumeEvents(self):
		events = self.pad.getEvents()
		# print 'events: ' + str(len(events))
		for e in events:
			if e.type == pygame.locals.JOYBUTTONDOWN:
				if self.pad.isPressed(PS3PadPygame.START):
					print 'start button pressed. exit.'
					return False
				elif self.pad.isPressed(PS3PadPygame.TOP):
					# pre-set 1
					print 'top button pressed.'
					self.usePreset(1)
				elif self.pad.isPressed(PS3PadPygame.CIRCLE):
					self.switchDebugPad()
				elif self.pad.isPressed(PS3PadPygame.BOX):
					self.controller.switchDebugPosition()

			elif False == self.isStartedAxisScanThread and e.type == pygame.locals.JOYAXISMOTION:
				self.scanAxis()
		return True


	def switchDebugPad(self):
		self.isEnabledDebugPad = not self.isEnabledDebugPad
class Scheduler(metaclass=ABCMeta):

    jobs_to_peek_arg = 7
    activate_random_arrival = False
    waiting_limit = -1

    def __init__(self, estimation: ComplementarityEstimation, cluster: Cluster, update_interval=60):
        self.queue = []
        self.estimation = estimation
        self.cluster = cluster
        self._timer = RepeatedTimer(update_interval, self.update_estimation)
        self.scheduler_lock = Lock()
        self.started_at = None
        self.stopped_at = None
        self.print_estimation = False
        self.waiting_time = {}
        self.scheduled_apps_num = 0
        self.jobs_to_peek = self.jobs_to_peek_arg
        self.random_arrival_rate = [0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 1, 0, 2, 0, 2,
                                    1, 0, 2, 2, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]

    def start(self):
        self.schedule()
        self._timer.start()
        self.started_at = time.time() - 3600

    def stop(self):
        self._timer.cancel()
        self.stopped_at = time.time() - 3600

    def update_estimation(self):
        for (apps, usage) in self.cluster.apps_usage():
            if len(apps) > 0 and usage.is_not_idle():
                for rest, out in LeaveOneOut(len(apps)):
                    self.estimation.update_app(apps[out[0]], [apps[i] for i in rest], usage.rate())
        if self.print_estimation:
            self.estimation.print()

    def add(self, app: Application):
        self.queue.append(app)

    def add_all(self, apps: List[Application]):
        self.queue.extend(apps)

    def schedule(self):
        while len(self.queue) > 0:
            try:
                app = self.schedule_application()
                if app.waiting_time != 0:
                    app.waiting_time = app.waiting_time - 1
                if app.waiting_time in self.waiting_time.keys():
                    self.waiting_time[app.waiting_time] = self.waiting_time[app.waiting_time] + 1
                else:
                    self.waiting_time[app.waiting_time] = 1
            except NoApplicationCanBeScheduled:
                print("No Application can be scheduled right now")
                break
            app.start(self.cluster.resource_manager, self._on_app_finished)
            if self.jobs_to_peek < len(self.queue) and self.activate_random_arrival:
                print("Update random arrival rate")
                self.jobs_to_peek = self.jobs_to_peek + self.random_arrival_rate[self.scheduled_apps_num]
            print("Scheduler round: {}".format(self.scheduled_apps_num))
            print("Jobs_to_peek = {}".format(self.jobs_to_peek))
            self.scheduled_apps_num = self.scheduled_apps_num + 1
            time.sleep(1) # add a slight delay so jobs could be submitted to yarn in order
        self.cluster.print_nodes()

    def schedule_application(self) -> Application:
        if self.cluster.available_containers()==0:
            raise NoApplicationCanBeScheduled
        app = self.get_application_to_schedule()
        if app.n_containers > self.cluster.available_containers():
            self.queue = [app] + self.queue
            raise NoApplicationCanBeScheduled

        self.place_containers(app)

        return app

    def _on_app_finished(self, app: Application):
        self.scheduler_lock.acquire()
        self.cluster.remove_applications(app)
        if len(self.queue) == 0 and self.cluster.has_application_scheduled() == 0:
            self.stop()
            self.on_stop()
        else:
            self.schedule()
        self.scheduler_lock.release()

    def on_stop(self):
        delta = self.stopped_at - self.started_at
        print("Queue took {:.0f}'{:.0f} to complete".format(delta // 60, delta % 60))
        self.estimation.save(self.estimation.output_folder)
        self.export_experiment_data()
        print("\n\n\n((((((((((  Waiting times  ))))))))))")
        for (key, value) in self.waiting_time.items():
            print("{} rounds waiting - {}".format(key,value))
        print(str(self.waiting_time))

    def export_experiment_data(self):
        print("\n\n\n=======Generate experiment output=======\n\n\n")
        host_list = "|".join([address for address in self.cluster.nodes.keys()])

        cmd_query_cpu = "\ninflux -precision rfc3339 -username root -password root" \
                        " -database 'telegraf' -host 'localhost' -execute 'SELECT usage_user,usage_iowait " \
                        "FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                        "AND cpu = '\\''cpu-total'\\'' GROUP BY host' -format 'csv' > /data/vinh.tran/new/expData/{}/cpu_{}.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_cpu)
        # subprocess.Popen(cmd_query_cpu, shell=True)

        cmd_query_cpu_mean = "\ninflux -precision rfc3339 -username root -password root" \
                             " -database 'telegraf' -host 'localhost' -execute 'SELECT mean(usage_user) as \"mean_cpu_percent\",mean(usage_iowait) as \"mean_io_wait\" " \
                             "FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                             "AND cpu = '\\''cpu-total'\\'' GROUP BY time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/cpu_{}_mean.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_cpu_mean)

        cmd_query_mem = "\ninflux -precision rfc3339 -username root -password root " \
                        "-database 'telegraf' -host 'localhost' -execute 'SELECT used_percent " \
                        "FROM \"telegraf\".\"autogen\".\"mem\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                        "GROUP BY host' -format 'csv' > /data/vinh.tran/new/expData/{}/mem_{}.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_mem)

        cmd_query_mem_mean = "\ninflux -precision rfc3339 -username root -password root " \
                             "-database 'telegraf' -host 'localhost' -execute 'SELECT mean(used_percent) " \
                             "FROM \"telegraf\".\"autogen\".\"mem\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                             "GROUP BY time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/mem_{}_mean.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_mem_mean)

        cmd_query_disk = "\ninflux -precision rfc3339 -username root -password root " \
                         "-database 'telegraf' -host 'localhost' -execute 'SELECT sum(read_bytes),sum(write_bytes) " \
                         "FROM (SELECT derivative(last(\"read_bytes\"),1s) as \"read_bytes\",derivative(last(\"write_bytes\"),1s) as \"write_bytes\",derivative(last(\"io_time\"),1s) as \"io_time\" " \
                         "FROM \"telegraf\".\"autogen\".\"diskio\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                         "GROUP BY \"host\",\"name\",time(10s)) WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' GROUP BY host,time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/disk_{}.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_disk)

        cmd_query_disk_mean = "\ninflux -precision rfc3339 -username root -password root " \
                              "-database 'telegraf' -host 'localhost' -execute 'SELECT sum(read_bytes),sum(write_bytes) " \
                              "FROM (SELECT derivative(last(\"read_bytes\"),1s) as \"read_bytes\",derivative(last(\"write_bytes\"),1s) as \"write_bytes\",derivative(last(\"io_time\"),1s) as \"io_time\" " \
                              "FROM \"telegraf\".\"autogen\".\"diskio\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                              "GROUP BY \"host\",\"name\",time(10s)) WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' GROUP BY time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/disk_{}_mean.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_disk_mean)

        cmd_query_net = "\ninflux -precision rfc3339 -username root -password root " \
                        "-database 'telegraf' -host 'localhost' -execute 'SELECT sum(download_bytes),sum(upload_bytes) FROM (SELECT  derivative(first(\"bytes_recv\"),1s) " \
                        "as \"download_bytes\",derivative(first(\"bytes_sent\"),1s) as \"upload_bytes\"" \
                        "FROM \"telegraf\".\"autogen\".\"net\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                        "GROUP BY \"host\",time(10s)) WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' GROUP BY host,time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/net_{}.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_net)

        cmd_query_net_mean = "\ninflux -precision rfc3339 -username root -password root " \
                             "-database 'telegraf' -host 'localhost' -execute 'SELECT sum(download_bytes),sum(upload_bytes) FROM (SELECT  derivative(first(\"bytes_recv\"),1s) " \
                             "as \"download_bytes\",derivative(first(\"bytes_sent\"),1s) as \"upload_bytes\"" \
                             "FROM \"telegraf\".\"autogen\".\"net\" WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' AND host =~ /{}/  " \
                             "GROUP BY \"host\",time(10s)) WHERE time > '\\''{}'\\'' and time < '\\''{}'\\'' GROUP BY time(10s)' -format 'csv' > /data/vinh.tran/new/expData/{}/net_{}_mean.csv" \
            .format(time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    host_list,
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.started_at)),
                    time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(self.stopped_at)),
                    Application.experiment_name,
                    Application.experiment_name)
        print(cmd_query_net_mean)

        subprocess.Popen(
            cmd_query_cpu + " && " + cmd_query_mem + " && " + cmd_query_disk + " && " + cmd_query_net + " && "
            + cmd_query_cpu_mean + " && " + cmd_query_mem_mean + " && " + cmd_query_disk_mean + " && " + cmd_query_net_mean,
            shell=True)

        time.sleep(1)

        with open("/data/vinh.tran/new/expData/{}/cmd.txt".format(Application.experiment_name), 'a') as file:
            file.write("{}\n\n{}\n\n{}\n\n{}\n\n\n\n{}\n\n{}\n\n{}\n\n{}\n".
                       format(cmd_query_cpu, cmd_query_mem, cmd_query_disk, cmd_query_net,
                              cmd_query_cpu_mean, cmd_query_mem_mean, cmd_query_disk_mean, cmd_query_net_mean))

    def get_application_to_schedule(self) -> Application:
        app = self.queue[0]
        if app.n_containers > self.cluster.available_containers():
            raise NoApplicationCanBeScheduled
        return self.queue.pop(0)

    @abstractmethod
    def place_containers(self, app: Application):
        pass

    def _place_random(self, app: Application, n_containers=4):
        nodes = self.cluster.non_full_nodes()
        good_nodes = [
            n for n in nodes
            if len(n.applications()) == 0 or n.applications()[0] != app
        ]
        if len(good_nodes) == 0:
            good_nodes = nodes
        node = good_nodes[np.random.randint(0, len(good_nodes))]
        return self._place(app, node, n_containers)

    @staticmethod
    def _place(app: Application, node: Node, n_containers=4):
        if n_containers <= 0:
            raise ValueError("Can not place {} containers".format(n_containers))
        # print("Place {} on {} ({})".format(app, node, node.available_containers()))

        n = len([t for t in app.tasks if t.node is not None])
        n += 1 if app.node is not None else 0

        for k in range(n, n + n_containers):
            if k < app.n_containers:
                node.add_container(app.containers[k])
                print("Place a task of {} on node {}".format(app, node))

        return k - n + 1
Exemplo n.º 3
0
class Scheduler(metaclass=ABCMeta):
    def __init__(self,
                 estimation: ComplementarityEstimation,
                 cluster: Cluster,
                 update_interval=60):
        self.queue = []
        self.estimation = estimation
        self.cluster = cluster
        self._timer = RepeatedTimer(update_interval, self.update_estimation)
        self.scheduler_lock = Lock()
        self.started_at = None
        self.stopped_at = None
        self.print_estimation = False

    def start(self):
        self.schedule()
        self._timer.start()
        self.started_at = time.time()

    def stop(self):
        self._timer.cancel()
        self.stopped_at = time.time()

    def update_estimation(self):
        for (apps, usage) in self.cluster.apps_usage():
            if len(apps) > 0 and usage.is_not_idle():
                for rest, out in LeaveOneOut(len(apps)):
                    self.estimation.update_app(apps[out[0]],
                                               [apps[i] for i in rest],
                                               usage.rate())
        if self.print_estimation:
            self.estimation.print()

    def add(self, app: Application):
        self.queue.append(app)

    def add_all(self, apps: List[Application]):
        self.queue.extend(apps)

    def schedule(self):
        while len(self.queue) > 0:
            try:
                app = self.schedule_application()
            except NoApplicationCanBeScheduled:
                print("No Application can be scheduled right now")
                break
            app.start(self.cluster.resource_manager, self._on_app_finished)
        self.cluster.print_nodes()

    def schedule_application(self) -> Application:
        app = self.get_application_to_schedule()
        if app.n_containers > self.cluster.available_containers():
            self.queue = [app] + self.queue
            raise NoApplicationCanBeScheduled

        self.place_containers(app)

        return app

    def _on_app_finished(self, app: Application):
        self.scheduler_lock.acquire()
        self.cluster.remove_applications(app)
        if len(self.queue) == 0 and self.cluster.has_application_scheduled(
        ) == 0:
            self.stop()
            self.on_stop()
        else:
            self.schedule()
        self.scheduler_lock.release()

    def on_stop(self):
        delta = self.stopped_at - self.started_at
        print("Queue took {:.0f}'{:.0f} to complete".format(
            delta // 60, delta % 60))
        self.estimation.save('estimation')

    def get_application_to_schedule(self) -> Application:
        app = self.queue[0]
        if app.n_containers > self.cluster.available_containers():
            raise NoApplicationCanBeScheduled
        return self.queue.pop(0)

    @abstractmethod
    def place_containers(self, app: Application):
        pass

    def _place_random(self, app: Application, n_containers=3):
        nodes = self.cluster.non_full_nodes()
        good_nodes = [
            n for n in nodes
            if len(n.applications()) == 0 or n.applications()[0] != app
        ]
        if len(good_nodes) == 0:
            good_nodes = nodes
        node = good_nodes[np.random.randint(0, len(good_nodes))]
        return self._place(app, node, n_containers)

    @staticmethod
    def _place(app: Application, node: Node, n_containers=3):
        if n_containers <= 0:
            raise ValueError(
                "Can not place {} containers".format(n_containers))
        # print("Place {} on {} ({})".format(app, node, node.available_containers()))

        n = len([t for t in app.tasks if t.node is not None])
        n += 1 if app.node is not None else 0

        for k in range(n, n + n_containers):
            if k < app.n_containers:
                node.add_container(app.containers[k])

        return k - n + 1
class RPiServoblasterController:

	# ServoBlasterへの反映間隔
	COMMIT_INTERVAL_SEC = 0.1

	# ./servodを実行した時のピン番号を使う(GPIOの番号でも、物理位置番号でもない)
	WAIST_SERVO_PIN = 7
	BOOM_SERVO_PIN = 6
	ARM_SERVO_PIN = 5
	CRAW_SERVO_PIN = 4

	# 操作対象とピン番号のマップ
	PIN_MAP = {
		0: WAIST_SERVO_PIN,
		1: BOOM_SERVO_PIN,
		2: ARM_SERVO_PIN,
		3: CRAW_SERVO_PIN,
	}

	def __init__(self):
		# GPIO番号でピンを指定
		# ServoBlasterの起動(rootで実行する必要あり)
		# 50%指定時の中間位置を--maxで調整する。--max=200がちょうどよかった
		os.system('sudo /home/pi/PiBits/ServoBlaster/user/servod --idle-timeout=2000 --max=200')

		self.servos = []
		for index in RPiServoblasterController.PIN_MAP.iterkeys():
			pin = RPiServoblasterController.PIN_MAP[index]
			# サーボを作る
			self.servos.append(SG90Servoblaster(pin, self.getPartName(index)))

		# self.positionの内容を定期的にcommit()を使ってservoblasterで反映する
		self.positions = []
		self.timer = RepeatedTimer(RPiServoblasterController.COMMIT_INTERVAL_SEC, self.commit)
		self.timer.start()

	# 直接同期的にservoblasterを呼ぶのではなく、
	# 一度内容をインスタンス変数に格納しておき、定期的にservoblasterで反映させる
	def apply(self, positions):
		self.positions = positions

	def shutdown(self):
		# if self.timer is not None:
		self.timer.cancel()

	def getIndexes(self):
		return RPiServoblasterController.PIN_MAP.keys()

	def switchDebugPosition(self):
		for index in RPiServoblasterController.PIN_MAP.iterkeys():
			self.servos[index].switchDebugPosition()


	# マルチスレッド下でタイマーから実行される
	def commit(self):
		if 0 == len(self.positions):
			return
		for index in RPiServoblasterController.PIN_MAP.iterkeys():
			degree = self.positions[index]
			self.servos[index].rotateTo(degree)

	def getPartName(self, index):
		if(0 == index):
			return 'WAIST'
		elif(1 == index):
			return 'BOOM'
		elif(2 == index):
			return 'ARM'
		elif(3 == index):
			return 'CRAW'
		else:
			return 'unknown'
class RPiPCA9685Controller:



	# 反映間隔
	COMMIT_INTERVAL_SEC = 0.1

	# ./servodを実行した時のピン番号を使う(GPIOの番号でも、物理位置番号でもない)
	WAIST_SERVO_PIN = 0
	BOOM_SERVO_PIN = 1
	ARM_SERVO_PIN = 2
	CRAW_SERVO_PIN = 3

	# 操作対象とピン番号のマップ
	PIN_MAP = {
		0: WAIST_SERVO_PIN,
		1: BOOM_SERVO_PIN,
		2: ARM_SERVO_PIN,
		3: CRAW_SERVO_PIN,
	}

	def __init__(self, address=0x40):
		# I2C経由でのコントロール用ライブラリ
		# https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code.git
		self.pwm = Adafruit_PCA9685.PCA9685(address, busnum=1)

		self.pwm.set_pwm_freq(SG90PCA9685.PWM_FREQ)

		self.servos = []
		for index in RPiPCA9685Controller.PIN_MAP.iterkeys():
			pin = RPiPCA9685Controller.PIN_MAP[index]
			# サーボを作る
			self.servos.append(SG90PCA9685(pin, self.getPartName(index), self.pwm))

		# self.positionの内容を定期的にcommit()を使ってservoblasterで反映する
		self.positions = []
		self.timer = RepeatedTimer(RPiPCA9685Controller.COMMIT_INTERVAL_SEC, self.commit)
		self.timer.start()

	# 直接同期的にservoblasterを呼ぶのではなく、
	# 一度内容をインスタンス変数に格納しておき、定期的にservoblasterで反映させる
	def apply(self, positions):
		self.positions = positions

	def shutdown(self):
		# if self.timer is not None:
		self.timer.cancel()

	def getIndexes(self):
		return RPiPCA9685Controller.PIN_MAP.keys()

	def switchDebugPosition(self):
		for index in RPiPCA9685Controller.PIN_MAP.iterkeys():
			self.servos[index].switchDebugPosition()

	# マルチスレッド下でタイマーから実行される
	def commit(self):
		if 0 == len(self.positions):
			return
		for index in RPiPCA9685Controller.PIN_MAP.iterkeys():
			degree = self.positions[index]
			self.servos[index].rotateTo(degree)

	def getPartName(self, index):
		if(0 == index):
			return 'WAIST'
		elif(1 == index):
			return 'BOOM'
		elif(2 == index):
			return 'ARM'
		elif(3 == index):
			return 'CRAW'
		else:
			return 'unknown'