Пример #1
0
 def init_test(self, testresult):
     '''测试用例初始化
     '''
     super(iTestCase, self).init_test(testresult)
     if not os.path.exists(self.attachments_path):
         os.makedirs(self.attachments_path)
     self._device_manager = DeviceManager()
Пример #2
0
def benchmark_model(flags):
    configs = format_model_config(flags)

    clear_build_dirs(configs[YAMLKeyword.library_name])

    target_socs = configs[YAMLKeyword.target_socs]
    device_list = DeviceManager.list_devices(flags.device_yml)
    if target_socs and ALL_SOC_TAG not in target_socs:
        device_list = [
            dev for dev in device_list
            if dev[YAMLKeyword.target_socs].lower() in target_socs
        ]

    for target_abi in configs[YAMLKeyword.target_abis]:
        # build benchmark_model binary
        for dev in device_list:
            if target_abi in dev[YAMLKeyword.target_abis]:
                toolchain = infer_toolchain(target_abi)
                build_benchmark_model(configs, target_abi, toolchain,
                                      not flags.disable_openmp,
                                      flags.mace_lib_type)
                device = DeviceWrapper(dev)
                with device.lock():
                    device.bm_specific_target(flags, configs, target_abi)
            else:
                six.print_('There is no abi %s with soc %s' %
                           (target_abi, dev[YAMLKeyword.target_socs]),
                           file=sys.stderr)
Пример #3
0
def benchmark_model(flags):
    configs = format_model_config(flags)

    clear_build_dirs(configs[YAMLKeyword.library_name])

    target_socs = configs[YAMLKeyword.target_socs]
    device_list = DeviceManager.list_devices(flags.device_yml)
    if target_socs and TargetSOCTag.all not in target_socs:
        device_list = [dev for dev in device_list
                       if dev[YAMLKeyword.target_socs].lower() in target_socs]
    for target_abi in configs[YAMLKeyword.target_abis]:
        if flags.target_socs == TargetSOCTag.random:
            target_devices = sh_commands.choose_a_random_device(
                device_list, target_abi)
        else:
            target_devices = device_list
        # build benchmark_model binary
        for dev in target_devices:
            if target_abi in dev[YAMLKeyword.target_abis]:
                toolchain = infer_toolchain(target_abi)
                build_benchmark_model(configs,
                                      target_abi,
                                      toolchain,
                                      not flags.disable_openmp,
                                      flags.mace_lib_type)
                device = DeviceWrapper(dev)
                start_time = time.time()
                with device.lock():
                    device.bm_specific_target(flags, configs, target_abi)
                elapse_minutes = (time.time() - start_time) / 60
                print("Elapse time: %f minutes." % elapse_minutes)
            else:
                six.print_('There is no abi %s with soc %s' %
                           (target_abi, dev[YAMLKeyword.target_socs]),
                           file=sys.stderr)
Пример #4
0
	def __init__(self):

		self.bytes_free = 0
		self.fail = 0

		config = ConfigParser()
		config.read(os.path.join(os.path.dirname(__file__), './config/settings_dev.cfg'))

		for s in config.sections():
			self.__dict__ = dict(self.__dict__.items() + {i[0]: i[1] for i in config.items(s)}.items())

		self.device = DeviceManager(hostname=self.device_hostname, username=self.ssh_username)
		self.music_path = os.path.join(self.music_base_path, self.music_folder)

		if not os.path.exists(self.music_path):
			logger.error("Music path %s does not exist." %(self.music_path))
			exit(1)
Пример #5
0
def main(unused_args):
    target = FLAGS.target
    host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target)
    target_abis = FLAGS.target_abis.split(',')
    dana_util = DanaUtil()

    for target_abi in target_abis:
        toolchain = infer_toolchain(target_abi)
        sh_commands.bazel_build(target,
                                abi=target_abi,
                                toolchain=toolchain,
                                enable_neon=FLAGS.enable_neon,
                                enable_quantize=FLAGS.enable_quantize,
                                enable_bfloat16=FLAGS.enable_bfloat16,
                                enable_fp16=FLAGS.enable_fp16,
                                enable_rpcmem=FLAGS.enable_rpcmem,
                                enable_hta=FLAGS.enable_hta,
                                address_sanitizer=FLAGS.address_sanitizer,
                                debug_mode=FLAGS.debug_mode)
        if FLAGS.run_target:
            target_devices = DeviceManager.list_devices(FLAGS.device_yml)
            if FLAGS.target_socs != TargetSOCTag.all and \
                    FLAGS.target_socs != TargetSOCTag.random:
                target_socs = set(FLAGS.target_socs.split(','))
                target_devices = \
                    [dev for dev in target_devices
                     if dev[YAMLKeyword.target_socs] in target_socs]
            if FLAGS.target_socs == TargetSOCTag.random:
                target_devices = sh_commands.choose_a_random_device(
                    target_devices, target_abi)

            for dev in target_devices:
                if target_abi not in dev[YAMLKeyword.target_abis]:
                    print("Skip device %s which does not support ABI %s" %
                          (dev, target_abi))
                    continue
                device_wrapper = DeviceWrapper(dev)
                stdouts = device_wrapper.run(
                    target_abi,
                    host_bin_path,
                    bin_name,
                    args=FLAGS.args,
                    opencl_profiling=True,
                    vlog_level=FLAGS.vlog_level,
                    out_of_range_check=True,
                    address_sanitizer=FLAGS.address_sanitizer,
                    simpleperf=FLAGS.simpleperf)
                globals()[FLAGS.stdout_processor](stdouts, dev, target_abi)
                if dana_util.service_available():
                    report_run_statistics(stdouts=stdouts,
                                          device=dev['device_name'],
                                          soc=dev['target_socs'],
                                          abi=target_abi,
                                          dana_util=dana_util)
Пример #6
0
def run_mace(flags):
    configs = format_model_config(flags)

    clear_build_dirs(configs[YAMLKeyword.library_name])

    target_socs = configs[YAMLKeyword.target_socs]
    device_list = DeviceManager.list_devices(flags.device_yml)
    if target_socs and TargetSOCTag.all not in target_socs:
        device_list = [dev for dev in device_list
                       if dev[YAMLKeyword.target_socs].lower() in target_socs]
    for target_abi in configs[YAMLKeyword.target_abis]:
        if flags.target_socs == TargetSOCTag.random:
            target_devices = sh_commands.choose_a_random_device(
                device_list, target_abi)
        else:
            target_devices = device_list
        # build target
        for dev in target_devices:
            if target_abi in dev[YAMLKeyword.target_abis]:
                # get toolchain
                toolchain = infer_toolchain(target_abi)
                device = DeviceWrapper(dev)
                if flags.example:
                    build_example(configs,
                                  target_abi,
                                  toolchain,
                                  not flags.disable_openmp,
                                  flags.mace_lib_type,
                                  flags.cl_binary_to_code,
                                  device)
                else:
                    build_mace_run(configs,
                                   target_abi,
                                   toolchain,
                                   not flags.disable_openmp,
                                   flags.address_sanitizer,
                                   flags.mace_lib_type)
                # run
                start_time = time.time()
                with device.lock():
                    device.run_specify_abi(flags, configs, target_abi)
                elapse_minutes = (time.time() - start_time) / 60
                print("Elapse time: %f minutes." % elapse_minutes)
            elif dev[YAMLKeyword.device_name] != SystemType.host:
                six.print_('The device with soc %s do not support abi %s' %
                           (dev[YAMLKeyword.target_socs], target_abi),
                           file=sys.stderr)

    # package the output files
    package_path = sh_commands.packaging_lib(BUILD_OUTPUT_DIR,
                                             configs[YAMLKeyword.library_name])
    print_package_summary(package_path)
Пример #7
0
def main(unused_args):
    target = FLAGS.target
    host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target)
    target_abis = FLAGS.target_abis.split(',')

    for target_abi in target_abis:
        toolchain = infer_toolchain(target_abi)
        sh_commands.bazel_build(target, abi=target_abi,
                                toolchain=toolchain,
                                enable_neon=FLAGS.enable_neon,
                                address_sanitizer=FLAGS.address_sanitizer)
        if FLAGS.run_target:
            target_devices = DeviceManager.list_devices(FLAGS.device_yml)
            if FLAGS.target_socs != "all" and FLAGS.target_socs != "random":
                target_socs = set(FLAGS.target_socs.split(','))
                target_devices = \
                    [dev for dev in target_devices
                     if dev[YAMLKeyword.target_socs] in target_socs]
            if FLAGS.target_socs == "random":
                unlocked_devices = \
                    [d for d in target_devices if
                     not sh_commands.is_device_locked(d)]
                if len(unlocked_devices) > 0:
                    target_devices = [random.choice(unlocked_devices)]
                else:
                    target_devices = [random.choice(target_devices)]

            for dev in target_devices:
                if target_abi not in dev[YAMLKeyword.target_abis]:
                    print("Skip device %s which does not support ABI %s" %
                          (dev, target_abi))
                    continue
                device_wrapper = DeviceWrapper(dev)
                stdouts = device_wrapper.run(
                    target_abi,
                    host_bin_path,
                    bin_name,
                    args=FLAGS.args,
                    opencl_profiling=True,
                    vlog_level=0,
                    out_of_range_check=True,
                    address_sanitizer=FLAGS.address_sanitizer,
                    simpleperf=FLAGS.simpleperf)
                globals()[FLAGS.stdout_processor](stdouts, dev,
                                                  target_abi)
Пример #8
0
def run_mace(flags):
    configs = format_model_config(flags)

    clear_build_dirs(configs[YAMLKeyword.library_name])

    target_socs = configs[YAMLKeyword.target_socs]
    device_list = DeviceManager.list_devices(flags.device_yml)
    if target_socs and ALL_SOC_TAG not in target_socs:
        device_list = [
            dev for dev in device_list
            if dev[YAMLKeyword.target_socs].lower() in target_socs
        ]
    for target_abi in configs[YAMLKeyword.target_abis]:
        # build target
        for dev in device_list:
            if target_abi in dev[YAMLKeyword.target_abis]:
                # get toolchain
                toolchain = infer_toolchain(target_abi)
                if flags.example:
                    build_example(configs, target_abi, toolchain,
                                  not flags.disable_openmp,
                                  flags.mace_lib_type)
                else:
                    build_mace_run(configs, target_abi, toolchain,
                                   not flags.disable_openmp,
                                   flags.address_sanitizer,
                                   flags.mace_lib_type)
                # run
                device = DeviceWrapper(dev)
                with device.lock():
                    device.run_specify_abi(flags, configs, target_abi)
            elif dev[YAMLKeyword.device_name] != SystemType.host:
                six.print_('The device with soc %s do not support abi %s' %
                           (dev[YAMLKeyword.target_socs], target_abi),
                           file=sys.stderr)

    # package the output files
    package_path = sh_commands.packaging_lib(BUILD_OUTPUT_DIR,
                                             configs[YAMLKeyword.library_name])
    print_package_summary(package_path)
Пример #9
0
class iTestCase(TestCase):
    '''QT4i测试用例基类
    '''

    crash_flag = False
    attachments_path = '/tmp/_attachments'

    def init_test(self, testresult):
        '''测试用例初始化
        '''
        super(iTestCase, self).init_test(testresult)
        if not os.path.exists(self.attachments_path):
            os.makedirs(self.attachments_path)
        self._device_manager = DeviceManager()

    initTest = init_test  #兼容驼峰式命名和下划线式命名

    def clean_test(self):
        '''测试用例清理
        '''
        Device.release_all()
        self._device_manager.release_drivers()
        DriverManager().shutdown()  #兼容本地设备的运行模式

    cleanTest = clean_test  #兼容驼峰式命名和下划线式命名

    def get_extra_fail_record(self):
        '''当错误发生时,获取需要额外添加的日志记录和附件信息
        
        :return: dict,dict - 日志记录,附件信息
        '''
        #(instruments截屏失败后 则会使用libimobiledevice截屏)
        attachments = {}
        for device in Device.Devices:
            log_path = os.path.join(
                self.attachments_path,
                "%s_%s.log" % (type(self).__name__, time.time()))
            with open(log_path, 'w') as fd:
                fd.write(urllib.unquote(device.driver.ins.get_log()))
                attachments['driver日志(%s)' % device.name] = log_path

            #记录当前是否发生crash,在具体的测试基类中(根据进程名称过滤)上传crash日志
            self.crash_flag = device.driver.ins.get_crash_flag()

            image_path = os.path.join(
                self.attachments_path,
                "%s_%s.png" % (type(self).__name__, time.time()))
            if device.screenshot(image_path):
                attachments['设备截图(%s)' % device.name] = image_path

        return {}, attachments

    def get_crash_log(self, procname):
        '''获取应用程序的crash日志
        
        :param procname: app的进程名,可通过xcode查看
        :type procname: str
        :return: string or None - crash日志路径
        '''
        crash_logs = {}
        for device in Device.Devices:
            crash_log = device.get_crash_log(procname)
            if crash_log:
                crash_logs['crash日志(%s)' % device.name] = crash_log
        return crash_logs
Пример #10
0
class FreshBeats:

	device_mount = None
	dev = None

	def __init__(self):

		self.bytes_free = 0
		self.fail = 0

		config = ConfigParser()
		config.read(os.path.join(os.path.dirname(__file__), './config/settings_dev.cfg'))

		for s in config.sections():
			self.__dict__ = dict(self.__dict__.items() + {i[0]: i[1] for i in config.items(s)}.items())

		self.device = DeviceManager(hostname=self.device_hostname, username=self.ssh_username)
		self.music_path = os.path.join(self.music_base_path, self.music_folder)

		if not os.path.exists(self.music_path):
			logger.error("Music path %s does not exist." %(self.music_path))
			exit(1)

	def report(self):

		logger.info("Report on device folder '%s'" % self.beats_target_folder)

		folders_on_device = self.device.get_music_folders_on_device(self.beats_target_folder)

		found_on_device = []
		#found_on_device_no_subfolder = []

		for folder_path in folders_on_device:
			
			logger.debug("Folder found: %s" % folder_path)

			tup = folder_path.split('/')
			
			if len(tup) < 2:
				#found_on_device_no_subfolder.append(folder_path)
				continue
			
			artist = tup[-2]
			album = tup[-1]				
		
			artist_matches = Artist.objects.filter(name=artist)

			if len(artist_matches) > 1:
				logger.debug("Found %s artists for '%s'" % (len(artist_matches), artist))

			for artist_match in artist_matches:
				album_match = Album.objects.filter(artist__name=artist_match.name, name=album).first()
				if album_match:
					found_on_device.append(album_match)
					break

		'''
		if len(found_on_device_no_subfolder) > 0:
			logger.warn("%s folders found without proper structure to perform lookup" % len(found_on_device_no_subfolder))
			logger.warn(found_on_device_no_subfolder)
		'''
		
		if len(found_on_device) == 0:
			logger.warn("No albums found on device")
		else:

			max_album = max([ len(a.name.encode('utf-8')) for a in found_on_device ]) + 1
			max_artist = max([ len(a.artist.name.encode('utf-8')) for a in found_on_device ]) + 1

			logger.info("Albums on Device")

			for a in found_on_device:
				checked_out = a.current_albumcheckout() is not None 
				logger.info("{0:<{1}} {2:<{3}} {4:>32} {5:>10}".format(a.name.encode('utf-8'), max_album, a.artist.name.encode('utf-8'), max_artist, a.action, "checked-out" if checked_out else "-")) 

		action_albums = Album.objects.filter(~Q(action=Album.DONOTHING), action__isnull=False)
		max_album = max([ len(a.name.encode('utf-8')) for a in action_albums ]) + 1
		max_artist = max([ len(a.artist.name.encode('utf-8')) for a in action_albums ]) + 1
		add_size = sum([ a.total_size for a in action_albums.filter(Q(action=Album.ADD) | Q(action=Album.REFRESH)) ])
		remove_size = sum([ a.total_size for a in action_albums.filter(action=Album.REMOVE) ])
		
		logger.info("Albums in Plan")		
		for a in action_albums:
			logger.info("{0:<{1}} / {2:<{3}}: {4:>32}".format(a.name.encode('utf-8'), max_album, a.artist.name.encode('utf-8'), max_artist, a.action))

		logger.info("ADDING {0} MB".format(add_size/(1024*1024)))
		logger.info("REMOVING {0} MB".format(remove_size/(1024*1024)))

	def update_db(self):

		try:

			for root, dirs, files in os.walk(self.music_path):

				if all(f in self.skip_files for f in files):
					logger.debug("	%s: all files skipped" % root)
					continue

				parts = root.split('/')

				album = parts[-1].strip()
				artist = parts[-2].strip()	

				logger.debug("%s / %s" %(artist, album))

				if '/'.join(parts[0:-2]) != self.music_path and artist != self.music_folder and '/'.join(parts) != self.music_path:
					logger.warn("	Path too deep - skipping - : %s" %('/'.join(parts)))
					continue

				all_files = files
				music_files = [ f for f in files if f[f.rfind("."):] in self.music_file_extensions and f.find("._") < 0 ]
				#music_files = filter(lambda x: x[x.rfind("."):] in self.music_file_extensions and x.find("._") < 0, files)

				tracks = len(music_files)

				if tracks == 0:
					logger.debug("	No tracks - skipping")
					continue
					
				total_size = sum(getsize(join(root, name)) for name in all_files)
				audio_size = sum(getsize(join(root, name)) for name in music_files)

				# -- files in the root of the artist folder, i.e. not in an album folder
				if artist == self.music_folder:
					logger.debug("	Only one level deep - no album")
					artist = album
					album = 'no album'	
				
				artist_match = None

				try:
					artist_match = Artist.objects.get(name=artist)
				except Artist.DoesNotExist as d:
					pass

				if artist_match is None:

					logger.debug("	New Artist: %s" % artist)

					artist_match = Artist(name=artist)
					artist_match.save()

				possible_album_match = None
				updated_existing_album = False

				try:
					
					possible_album_match = Album.objects.get(artist=artist_match, name=album)

				except Album.DoesNotExist as d:

					pass # - it's OK

				if possible_album_match is None:

					logger.debug("	New Album: %s" % album)

					a = Album(artist=artist_match, name=album, tracks=0, total_size=total_size, audio_size=0, old_total_size=0, rating=Album.UNRATED)
					a.save()

				else:

					a = possible_album_match

					# - why True? forcing all albums to update?
					if int(a.tracks) != len(music_files) or int(a.total_size) != int(total_size) or int(a.audio_size) != int(audio_size):

						logger.debug("	Updating this album (hardcoded or for a following reason:)")

						if int(a.tracks) != len(music_files):
							logger.info("Track count: %s/%s" %(int(a.tracks), len(music_files)))
						if int(a.total_size) != int(total_size):
							logger.info("Total size: %s/%s" %(int(a.total_size), int(total_size)))
						if int(a.audio_size) != int(audio_size):
							logger.info("Audio size: %s/%s" %(int(a.audio_size), int(audio_size)))

						updated_existing_album = True

						a.tracks = 0
						a.old_total_size = a.total_size
						a.total_size = total_size
						a.audio_size = 0

						# - keep statuses!

						a.save()

						for song in a.song_set.all():
							song.delete()

				# - if new, or we made any changes to the album, rewrite the song records
				# - the songs were already cleared (above) if we updated and naturally empty if new
				if possible_album_match is None or updated_existing_album:

					for f in music_files:
						
						song_sha1sum = self._get_sha1sum(root, f)

						#logger.debug("%s: %s - %s" %(a.name, f.encode('utf-8'), song_sha1sum))

						s = Song(album=a, name=f, sha1sum=song_sha1sum)
						s.save()

						a.tracks += 1
						a.audio_size += getsize(join(root, f))

						a.save()

					if possible_album_match is None :
						logger.info("Inserted %s %s" %(artist, album))
					elif updated_existing_album:
						logger.info("Updated %s %s" %(artist, album))

			# - when we're all said and done adding, delete albums that cannot be found on disk (if they've never been checked-out)
			albums = Album.objects.filter(albumcheckout__isnull=True)

			for a in albums:

				if not os.path.exists(self._get_storage_path(a)):
					a.delete()
					logger.info("Deleted %s %s" %(a.artist, a.name))

		except:

			message = str(sys.exc_info()[1])

			logging.error(str(sys.exc_info()[0]))
			logging.error(message)
			traceback.print_tb(sys.exc_info()[2])

	def _get_sha1sum(self, root, filename):

		sha1 = hashlib.sha1()

		with open(join(root, filename), 'rb') as f:
		    while True:
		        data = f.read(BUF_SIZE)
		        if not data:
		            break
		        sha1.update(data)

		song_sha1sum = sha1.hexdigest()
		
		return song_sha1sum

	def mark_albums(self):

		device_free_bytes = self.device.get_free_bytes()
		am = AlbumManager(free_bytes_margin=int(self.free_space_mb)*1024*1024, device_free_bytes=device_free_bytes)

		albums_to_remove = Album.objects.filter(action=Album.REMOVE)
		logger.info("Registering %s previously marked albums to check-in.." % len(albums_to_remove))
		am.checkin_albums(albums_to_remove)

		am.status()

		requested_albums_to_add = Album.objects.filter(action=Album.REQUEST_ADD, sticky=False)
		logger.info("Registering %s previously requested albums to check-out.." % len(requested_albums_to_add))
		for album in requested_albums_to_add:
			if not am.checkout_album(album):
				logger.warn("Rejected checkout of %s/%s" % (album.artist.name, album.name))

		am.status()

		sticky_albums = Album.objects.filter(Q(albumcheckout__return_at__isnull=False) | Q(albumcheckout__isnull=True), sticky=True)
		logger.info("Registering %s sticky albums to check-out.." % len(sticky_albums))
		for album in sticky_albums:
			if not am.checkout_album(album):
				logger.warn("Rejected checkout of sticky %s/%s" % (album.artist.name, album.name))

		am.status()

		albums_to_add = Album.objects.filter(action=Album.ADD, sticky=False)
		logger.info("Registering %s previously marked albums to check-out.." % len(albums_to_add))
		for album in albums_to_add:
			if not am.checkout_album(album):
				logger.warn("Rejected checkout of %s/%s" % (album.artist.name, album.name))

		am.status()

		albums_to_refresh = Album.objects.filter(action=Album.REFRESH)
		logger.info("Registering %s previously marked albums to refresh.." % len(albums_to_refresh))
		for album in albums_to_refresh:
			if not am.refresh_album(album):
				logger.warn("Rejected refresh of %s/%s" %(album.artist.name, album.name))

		am.status()

		# - we want to keep at least one mix-it-up-rated album
		# - if we aren't renewing one, pick one to check out
		any_kept_mixins = Album.objects.filter(action__in=[Album.DONOTHING, Album.REFRESH], rating=Album.MIXITUP, albumcheckout__return_at__isnull=True).exists()
		
		if not any_kept_mixins:

			new_mixins = Album.objects.filter(action__in=[Album.DONOTHING, Album.REFRESH], rating=Album.MIXITUP, albumcheckout__return_at__isnull=False)
			new_mixin_list = list(new_mixins)				

			if len(new_mixin_list) > 0:
				new_mixin = random.choice(new_mixin_list)
				logger.info("Registering a mix-it-up album...")
				if not am.checkout_album(new_mixin):
					logger.warn("Rejected checkout of %s/%s" % (album.artist.name, album.name))

		loveit_albums = Album.objects.filter(rating=Album.LOVEIT, action=None)
		never_checked_out_albums = Album.objects.filter(albumcheckout__isnull=True, action=None)
		unrated_albums = Album.objects.filter(rating=Album.UNRATED, action=None, albumstatus__isnull=True)
		album_lists = [loveit_albums, never_checked_out_albums, unrated_albums]

		fails = 10

		while True:
			random_list = random.choice(album_lists)
			if len(random_list) == 0:
				album_lists.remove(random_list)
				continue
			album = random.choice(random_list)
			logger.info("Registering a random album...")
			if not am.checkout_album(album):
				fails = fails - 1
				logger.warn("Rejected checkout of %s/%s - attempts left: %s" % (album.artist.name, album.name, fails))				
			if fails <= 0:
				break

	def copy_files(self):

		remove_albums = Album.objects.filter(action=Album.REMOVE)

		for r in remove_albums:
			self.remove_album(r)

		refresh_albums = Album.objects.filter(action=Album.REFRESH)

		for u in refresh_albums:
			self.remove_album(u)
			self.copy_album_to_device(u)

		add_albums = Album.objects.filter(action=Album.ADD)

		for a in add_albums:
			self.copy_album_to_device(a)

	def remove_album(self, a):

		logger.info("removing: %s %s" %(a.artist.name, a.name))

		rm_statement = ['ssh', '%s@%s' % (self.ssh_username, self.device_hostname), 'rm -rf "%s"' %(join(self.device_mount, self.beats_target_folder, a.artist.name, a.name))]
		ps = subprocess.Popen(rm_statement)
		(out,err,) = ps.communicate(None)

		logger.info("Remove code: %s" % ps.returncode)

		if ps.returncode == 0:
			current_checkout = a.current_albumcheckout()
			if current_checkout:
				current_checkout.return_at = datetime.datetime.now()
				current_checkout.save()
			else:
				logger.warn("removing, but not checked out????")
			a.action = None
			a.save()

	def pull_photos(self):

		pull_pairs = zip(self.sources.split(','), self.targets.split(','))

		for source, target in pull_pairs:

			logger.info("Copying %s to %s" %(source, target))
			source_uri = '%s@%s:%s' %(self.ssh_username, self.device_hostname, source)

			copy_statement = ['scp', '-r', '-p', source_uri, target]
			ps = subprocess.Popen(copy_statement, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			(out,err,) = ps.communicate(None)

			logger.info(out)

			if err:				
				logger.error(err)

			logger.debug("Copy code: %s" % ps.returncode)

	def copy_album_to_device(self, a):

		artist_folder = os.path.join(self.beats_target_folder, a.artist.name) #join(self.device_mount, self.beats_target_folder, a.artist)
		
		logger.info("adding folder: %s" %(artist_folder))

		mkdir_statement = ['ssh', '%s@%s' %(self.ssh_username, self.device_hostname), 'mkdir -p "%s"' % artist_folder]
		logger.info(mkdir_statement)
		
		ps = subprocess.Popen(mkdir_statement, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
		(out,err,) = ps.communicate(None)

		logger.debug('out: %s' % out)
		logger.debug('err: %s' % err)

		cp_statement = ['scp', '-r', self._get_storage_path(a), '%s@%s:"%s"' %(self.ssh_username, self.device_hostname, artist_folder)]
		logger.info(cp_statement)

		ps = subprocess.Popen(cp_statement, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
		(out,err,) = ps.communicate(None)

		logger.debug('out: %s' % out)
		logger.debug('err: %s' % err)

		logger.info("Add code: %s" % ps.returncode)

		if ps.returncode == 0:
			ac = AlbumCheckout(album=a, checkout_at=datetime.datetime.now())
			ac.save()
			a.action = None
			a.save()

	def _get_storage_path(self, album):

		if album.name == 'no album':
			return join(self.music_base_path, self.music_folder, album.artist.name)

		return join(self.music_base_path, self.music_folder, album.artist.name, album.name)
Пример #11
0
    def __init__(self,
                 mode,
                 iterator,
                 params,
                 rev_vocab_table=None,
                 scope=None,
                 log_trainables=True):

        print_out("# creating %s graph ..." % mode)
        self.dtype = tf.float32

        self.mode = mode
        self.embedding_size = params.embedding_size
        self.num_layers = params.num_layers
        self.iterator = iterator

        # self.scheduled_sampling_prob = scheduled_sampling_prob
        # self.num_samples_for_loss = num_samples_for_loss

        self.device_manager = DeviceManager()
        self.round_robin = RoundRobin(self.device_manager)
        self.num_gpus = self.device_manager.num_available_gpus()
        print_out("# number of gpus %d" % self.num_gpus)

        with tf.variable_scope(scope or 'ta_seq2seq_graph', dtype=self.dtype):
            self.init_embeddings(params.vocab_file,
                                 params.embedding_type,
                                 self.embedding_size,
                                 scope=scope)

            with tf.variable_scope(scope or "build_network"):
                with tf.variable_scope("output_projection") as output_scope:
                    if params.boost_topic_gen_prob:
                        self.output_layer = taware_layer.JointDenseLayer(
                            params.vocab_size,
                            params.topic_vocab_size,
                            scope=output_scope,
                            name="output_projection")
                    else:
                        self.output_layer = layers_core.Dense(
                            params.vocab_size,
                            # activation=tf.nn.tanh,
                            use_bias=False,
                            name="output_projection")

            encoder_keep_prob, decoder_keep_prob = self.get_keep_probs(
                mode, params)
            self.batch_size = tf.size(self.iterator.source_sequence_lengths)

            encoder_outputs, encoder_state = self.__build_encoder(
                params, encoder_keep_prob)

            logits, sample_id, final_decoder_state = self.__build_decoder(
                params, encoder_outputs, encoder_state, decoder_keep_prob)

            if mode != tf.contrib.learn.ModeKeys.INFER:
                with tf.device(self.device_manager.tail_gpu()):
                    loss = self.__compute_loss(logits)
            else:
                loss = None

            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.train_loss = loss
                self.word_count = tf.reduce_sum(
                    self.iterator.source_sequence_lengths) + tf.reduce_sum(
                        self.iterator.target_sequence_length)
            elif mode == tf.contrib.learn.ModeKeys.EVAL:
                self.eval_loss = loss
            elif mode == tf.contrib.learn.ModeKeys.INFER:
                self.sample_words = rev_vocab_table.lookup(
                    tf.to_int64(sample_id))

            if mode != tf.contrib.learn.ModeKeys.INFER:
                ## Count the number of predicted words for compute ppl.
                self.predict_count = tf.reduce_sum(
                    self.iterator.target_sequence_length)

            self.global_step = tf.Variable(0, trainable=False)
            trainables = tf.trainable_variables()

            # Gradients and SGD update operation for training the model.
            # Arrage for the embedding vars to appear at the beginning.
            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.learning_rate = tf.constant(params.learning_rate)
                # decay
                self.learning_rate = self._get_learning_rate_decay(
                    params, self.global_step, self.learning_rate)

                # Optimizer
                if params.optimizer.lower() == "sgd":
                    opt = tf.train.GradientDescentOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                elif params.optimizer.lower() == "adam":
                    opt = tf.train.AdamOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                else:
                    raise ValueError('Unknown optimizer: ' + params.optimizer)

                # Gradients
                gradients = tf.gradients(self.train_loss,
                                         trainables,
                                         colocate_gradients_with_ops=True)

                clipped_grads, grad_norm = tf.clip_by_global_norm(
                    gradients, params.max_gradient_norm)
                grad_norm_summary = [tf.summary.scalar("grad_norm", grad_norm)]
                grad_norm_summary.append(
                    tf.summary.scalar("clipped_gradient",
                                      tf.global_norm(clipped_grads)))

                self.grad_norm = grad_norm

                self.update = opt.apply_gradients(zip(clipped_grads,
                                                      trainables),
                                                  global_step=self.global_step)

                # Summary
                self.train_summary = tf.summary.merge([
                    tf.summary.scalar("lr", self.learning_rate),
                    tf.summary.scalar("train_loss", self.train_loss),
                ] + grad_norm_summary)

            if mode == tf.contrib.learn.ModeKeys.INFER:
                self.infer_logits, self.sample_id = logits, sample_id
                self.infer_summary = tf.no_op()

            # Saver
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

            # Print trainable variables
            if log_trainables:
                print_out("# Trainable variables")
                for trainable in trainables:
                    print_out("  %s, %s, %s" %
                              (trainable.name, str(
                                  trainable.get_shape()), trainable.op.device))
Пример #12
0
class TopicAwareSeq2SeqModel(AbstractModel):
    """Sequence-to-sequence model with/without attention mechanism and for multiple buckets.
    """
    def __init__(self,
                 mode,
                 iterator,
                 params,
                 rev_vocab_table=None,
                 scope=None,
                 log_trainables=True):

        print_out("# creating %s graph ..." % mode)
        self.dtype = tf.float32

        self.mode = mode
        self.embedding_size = params.embedding_size
        self.num_layers = params.num_layers
        self.iterator = iterator

        # self.scheduled_sampling_prob = scheduled_sampling_prob
        # self.num_samples_for_loss = num_samples_for_loss

        self.device_manager = DeviceManager()
        self.round_robin = RoundRobin(self.device_manager)
        self.num_gpus = self.device_manager.num_available_gpus()
        print_out("# number of gpus %d" % self.num_gpus)

        with tf.variable_scope(scope or 'ta_seq2seq_graph', dtype=self.dtype):
            self.init_embeddings(params.vocab_file,
                                 params.embedding_type,
                                 self.embedding_size,
                                 scope=scope)

            with tf.variable_scope(scope or "build_network"):
                with tf.variable_scope("output_projection") as output_scope:
                    if params.boost_topic_gen_prob:
                        self.output_layer = taware_layer.JointDenseLayer(
                            params.vocab_size,
                            params.topic_vocab_size,
                            scope=output_scope,
                            name="output_projection")
                    else:
                        self.output_layer = layers_core.Dense(
                            params.vocab_size,
                            # activation=tf.nn.tanh,
                            use_bias=False,
                            name="output_projection")

            encoder_keep_prob, decoder_keep_prob = self.get_keep_probs(
                mode, params)
            self.batch_size = tf.size(self.iterator.source_sequence_lengths)

            encoder_outputs, encoder_state = self.__build_encoder(
                params, encoder_keep_prob)

            logits, sample_id, final_decoder_state = self.__build_decoder(
                params, encoder_outputs, encoder_state, decoder_keep_prob)

            if mode != tf.contrib.learn.ModeKeys.INFER:
                with tf.device(self.device_manager.tail_gpu()):
                    loss = self.__compute_loss(logits)
            else:
                loss = None

            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.train_loss = loss
                self.word_count = tf.reduce_sum(
                    self.iterator.source_sequence_lengths) + tf.reduce_sum(
                        self.iterator.target_sequence_length)
            elif mode == tf.contrib.learn.ModeKeys.EVAL:
                self.eval_loss = loss
            elif mode == tf.contrib.learn.ModeKeys.INFER:
                self.sample_words = rev_vocab_table.lookup(
                    tf.to_int64(sample_id))

            if mode != tf.contrib.learn.ModeKeys.INFER:
                ## Count the number of predicted words for compute ppl.
                self.predict_count = tf.reduce_sum(
                    self.iterator.target_sequence_length)

            self.global_step = tf.Variable(0, trainable=False)
            trainables = tf.trainable_variables()

            # Gradients and SGD update operation for training the model.
            # Arrage for the embedding vars to appear at the beginning.
            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.learning_rate = tf.constant(params.learning_rate)
                # decay
                self.learning_rate = self._get_learning_rate_decay(
                    params, self.global_step, self.learning_rate)

                # Optimizer
                if params.optimizer.lower() == "sgd":
                    opt = tf.train.GradientDescentOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                elif params.optimizer.lower() == "adam":
                    opt = tf.train.AdamOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                else:
                    raise ValueError('Unknown optimizer: ' + params.optimizer)

                # Gradients
                gradients = tf.gradients(self.train_loss,
                                         trainables,
                                         colocate_gradients_with_ops=True)

                clipped_grads, grad_norm = tf.clip_by_global_norm(
                    gradients, params.max_gradient_norm)
                grad_norm_summary = [tf.summary.scalar("grad_norm", grad_norm)]
                grad_norm_summary.append(
                    tf.summary.scalar("clipped_gradient",
                                      tf.global_norm(clipped_grads)))

                self.grad_norm = grad_norm

                self.update = opt.apply_gradients(zip(clipped_grads,
                                                      trainables),
                                                  global_step=self.global_step)

                # Summary
                self.train_summary = tf.summary.merge([
                    tf.summary.scalar("lr", self.learning_rate),
                    tf.summary.scalar("train_loss", self.train_loss),
                ] + grad_norm_summary)

            if mode == tf.contrib.learn.ModeKeys.INFER:
                self.infer_logits, self.sample_id = logits, sample_id
                self.infer_summary = tf.no_op()

            # Saver
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

            # Print trainable variables
            if log_trainables:
                print_out("# Trainable variables")
                for trainable in trainables:
                    print_out("  %s, %s, %s" %
                              (trainable.name, str(
                                  trainable.get_shape()), trainable.op.device))

    def __build_encoder(self, params, keep_prob):
        with variable_scope.variable_scope("encoder"):
            iterator = self.iterator
            encoder_embedded_inputs = tf.nn.embedding_lookup(
                params=self.embeddings, ids=iterator.sources)

            if params.encoder_type == "uni":
                print_out("  build unidirectional encoder num_layers = %d" %
                          params.num_layers)
                cell = create_cell(params.cell_type,
                                   params.hidden_units,
                                   self.num_layers,
                                   input_keep_prob=keep_prob,
                                   devices=self.round_robin.assign(
                                       self.num_layers))

                encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
                    cell,
                    inputs=encoder_embedded_inputs,
                    sequence_length=iterator.source_sequence_lengths,
                    dtype=self.dtype,
                    swap_memory=True)
                return encoder_outputs, encoder_state
            elif params.encoder_type == "bi":
                num_bi_layers = int(params.num_layers / 2)
                print_out("  build bidirectional encoder num_layers = %d" %
                          params.num_layers)

                fw_cell = create_cell(
                    params.cell_type,
                    params.hidden_units,
                    num_bi_layers,
                    input_keep_prob=keep_prob,
                    devices=self.round_robin.assign(num_bi_layers))
                bw_cell = create_cell(
                    params.cell_type,
                    params.hidden_units,
                    num_bi_layers,
                    input_keep_prob=keep_prob,
                    devices=self.round_robin.assign(
                        num_bi_layers,
                        self.device_manager.num_available_gpus() - 1))

                encoder_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
                    fw_cell,
                    bw_cell,
                    encoder_embedded_inputs,
                    dtype=self.dtype,
                    sequence_length=iterator.source_sequence_lengths,
                    swap_memory=True)

                if num_bi_layers == 1:
                    encoder_state = bi_state
                else:
                    # alternatively concat forward and backward states
                    encoder_state = []
                    for layer_id in range(num_bi_layers):
                        encoder_state.append(bi_state[0][layer_id])  # forward
                        encoder_state.append(bi_state[1][layer_id])  # backward
                    encoder_state = tuple(encoder_state)

                return encoder_outputs, encoder_state
            else:
                raise ValueError("Unknown encoder type: %s" %
                                 params.encoder_type)

    def __build_decoder_cell(self, params, encoder_outputs, encoder_state,
                             keep_prob):
        cell = create_cell(params.cell_type,
                           params.hidden_units,
                           self.num_layers,
                           input_keep_prob=keep_prob,
                           devices=self.round_robin.assign(self.num_layers))

        topical_embeddings = tf.nn.embedding_lookup(self.embeddings,
                                                    self.iterator.topic)

        max_topic_length = tf.reduce_max(self.iterator.topic_sequence_length)

        aggregated_state = encoder_state
        if isinstance(encoder_state, tuple):
            aggregated_state = encoder_state[0]
            for state in encoder_state[1:]:
                aggregated_state = tf.concat([aggregated_state, state], axis=1)

        if isinstance(encoder_outputs, tuple):
            aggregated_outputs = encoder_outputs[0]
            for output in encoder_outputs[1:]:
                aggregated_outputs = tf.concat([aggregated_outputs, output],
                                               axis=1)

            encoder_outputs = aggregated_outputs

        expanded_encoder_state = tf.tile(
            tf.expand_dims(aggregated_state, axis=1), [1, max_topic_length, 1])
        topical_embeddings = tf.concat(
            [expanded_encoder_state, topical_embeddings], axis=2)

        if self.mode == tf.contrib.learn.ModeKeys.INFER and params.beam_width > 0:
            batch_size = self.batch_size * params.beam_width

            if isinstance(encoder_state, tuple):
                decoder_initial_state = tuple([
                    tf.contrib.seq2seq.tile_batch(state,
                                                  multiplier=params.beam_width)
                    for state in encoder_state
                ])
            else:
                decoder_initial_state = tf.contrib.seq2seq.tile_batch(
                    encoder_state, multiplier=params.beam_width)

            memory = tf.contrib.seq2seq.tile_batch(
                encoder_outputs, multiplier=params.beam_width)
            topical_embeddings = tf.contrib.seq2seq.tile_batch(
                topical_embeddings, multiplier=params.beam_width)
            source_sequence_length = tf.contrib.seq2seq.tile_batch(
                self.iterator.source_sequence_lengths,
                multiplier=params.beam_width)
            topic_sequence_length = tf.contrib.seq2seq.tile_batch(
                self.iterator.topic_sequence_length,
                multiplier=params.beam_width)
        else:
            batch_size = self.batch_size
            decoder_initial_state = encoder_state
            memory = encoder_outputs
            source_sequence_length = self.iterator.source_sequence_lengths
            topic_sequence_length = self.iterator.topic_sequence_length

        message_attention = create_attention_mechanism(params.attention_type,
                                                       params.hidden_units,
                                                       memory,
                                                       source_sequence_length)

        topical_attention = create_attention_mechanism(params.attention_type,
                                                       params.hidden_units,
                                                       topical_embeddings,
                                                       topic_sequence_length)

        alignment_history = self.mode == tf.contrib.learn.ModeKeys.INFER and params.beam_width == 0
        cell = tf.contrib.seq2seq.AttentionWrapper(
            cell,
            attention_mechanism=(message_attention, topical_attention),
            attention_layer_size=(params.hidden_units, params.hidden_units),
            alignment_history=alignment_history,
            output_attention=True,
            name="joint_attention")

        decoder_initial_state = cell.zero_state(
            batch_size, self.dtype).clone(cell_state=decoder_initial_state)

        return cell, decoder_initial_state

    def __build_decoder(self, params, encoder_outputs, encoder_state,
                        keep_prob):
        iterator = self.iterator
        with variable_scope.variable_scope("decoder") as decoder_scope:
            cell, initial_state = self.__build_decoder_cell(
                params, encoder_outputs, encoder_state, keep_prob)

            if self.mode != tf.contrib.learn.ModeKeys.INFER:
                # decoder_emp_inp: [max_time, batch_size, num_units]
                decoder_emb_inp = tf.nn.embedding_lookup(
                    self.embeddings, iterator.target_input)

                # Helper
                helper = tf.contrib.seq2seq.TrainingHelper(
                    decoder_emb_inp, iterator.target_sequence_length)

                # Decoder
                my_decoder = taware_decoder.ConservativeBasicDecoder(
                    cell, helper, initial_state, self.output_layer)

                # Dynamic decoding
                outputs, final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(
                    my_decoder, swap_memory=True, scope=decoder_scope)

                sample_id = outputs.sample_id
                logits = outputs.rnn_output

                # Note: there's a subtle difference here between train and inference.
                # We could have set output_layer when create my_decoder
                #   and shared more code between train and inference.
                # We chose to apply the output_layer to all timesteps for speed:
                #   10% improvements for small models & 20% for larger ones.
                # If memory is a concern, we should apply output_layer per timestep.
                # with tf.device(self.device_manager.tail_gpu()):
                #     logits = self.output_layer(outputs.rnn_output)

            ### Inference
            else:
                beam_width = params.beam_width
                start_tokens = tf.fill([self.batch_size], vocab.SOS_ID)
                end_token = vocab.EOS_ID

                decoding_length_factor = params.decoding_length_factor
                max_encoder_length = tf.reduce_max(
                    iterator.source_sequence_lengths)
                maximum_iterations = tf.to_int32(
                    tf.round(
                        tf.to_float(max_encoder_length) *
                        decoding_length_factor))

                if beam_width > 0:
                    my_decoder = taware_decoder.ConservativeBeamSearchDecoder(
                        cell,
                        self.embeddings,
                        start_tokens,
                        end_token,
                        initial_state=initial_state,
                        beam_width=beam_width,
                        output_layer=self.output_layer,
                        length_penalty_weight=params.length_penalty_weight)
                else:
                    helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
                        self.embeddings, start_tokens, end_token)

                    # Decoder
                    my_decoder = taware_decoder.ConservativeBasicDecoder(
                        cell,
                        helper,
                        initial_state,
                        output_layer=self.output_layer  # applied per timestep
                    )

                # Dynamic decoding
                outputs, final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(
                    my_decoder,
                    maximum_iterations=maximum_iterations,
                    swap_memory=True,
                    scope=decoder_scope)

                if beam_width > 0:
                    logits = tf.no_op()
                    sample_id = outputs.predicted_ids
                else:
                    logits = outputs.rnn_output
                    sample_id = outputs.sample_id

        return logits, sample_id, final_decoder_state

    def __compute_loss(self, logits):
        iterator = self.iterator
        crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=iterator.target_output, logits=logits)

        max_time = iterator.target_output.shape[1].value or tf.shape(
            iterator.target_output)[1]
        target_weights = tf.sequence_mask(iterator.target_sequence_length,
                                          max_time,
                                          dtype=logits.dtype)

        loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(
            self.batch_size)
        return loss

    def train(self, sess):
        assert self.mode == tf.contrib.learn.ModeKeys.TRAIN

        return sess.run([
            self.update, self.train_loss, self.predict_count,
            self.train_summary, self.global_step, self.word_count,
            self.batch_size, self.grad_norm, self.learning_rate
        ])

    def eval(self, sess):
        assert self.mode == tf.contrib.learn.ModeKeys.EVAL

        return sess.run([self.eval_loss, self.predict_count, self.batch_size])

    def infer(self, sess):
        assert self.mode == tf.contrib.learn.ModeKeys.INFER

        return sess.run([
            self.infer_logits, self.infer_summary, self.sample_id,
            self.sample_words
        ])

    def decode(self, sess):
        _, infer_summary, _, sample_words = self.infer(sess)
        if sample_words.ndim == 3:  # beam search output in [batch_size,
            # time, beam_width] shape.
            sample_words = sample_words.transpose([2, 0, 1])

        return sample_words, infer_summary