Exemple #1
0
    def _check_hardlinks_( self ):
        tmp_path = os.path.join( self.path, 'driveinfo.tmp' )
        tools.make_dirs( tmp_path )
        if not os.path.isdir( tmp_path ):
            return False

        file1_path = os.path.join( tmp_path, 'file1' )
        file2_path = os.path.join( tmp_path, 'file2' )

        ret_val = False

        os.system( "echo abc > \"%s\"" % file1_path )
        os.system( "ln \"%s\" \"%s\"" % ( file1_path, file2_path ) )
        os.system( "echo abc > \"%s\"" % file2_path )

        if os.path.exists( file1_path ) and os.path.exists( file2_path ):
            try:
                info1 = os.stat( file1_path )
                info2 = os.stat( file2_path )

                if info1.st_size == info2.st_size:
                    ret_val = True
            except:
                pass

        os.system( "rm -rf \"%s\"" % tmp_path )
        return ret_val
Exemple #2
0
    def backup_config(self):
        """
        create a backup of encfs config file into local config folder
        so in cases of the config file get deleted or corrupt user can restore
        it from there
        """
        cfg = self.get_config_file()
        if not os.path.isfile(cfg):
            logger.warning(
                'No encfs config in %s. Skip backup of config file.' % cfg,
                self)
            return
        backup_folder = self.config.get_encfsconfig_backup_folder(
            self.profile_id)
        tools.make_dirs(backup_folder)
        old_backups = os.listdir(backup_folder)
        old_backups.sort(reverse=True)
        if len(old_backups):
            last_backup = os.path.join(backup_folder, old_backups[0])

            #don't create a new backup if config hasn't changed
            if tools._get_md5sum_from_path(cfg) == \
               tools._get_md5sum_from_path(last_backup):
                logger.debug('Encfs config did not change. Skip backup', self)
                return

        new_backup_file = '.'.join(
            (os.path.basename(cfg), datetime.now().strftime('%Y%m%d%H%M')))
        new_backup = os.path.join(backup_folder, new_backup_file)
        logger.debug(
            'Create backup of encfs config %s to %s' % (cfg, new_backup), self)
        shutil.copy2(cfg, new_backup)
Exemple #3
0
    def backup_config(self):
        """
        create a backup of encfs config file into local config folder
        so in cases of the config file get deleted or corrupt user can restore
        it from there
        """
        cfg = self.get_config_file()
        if not os.path.isfile(cfg):
            logger.warning('No encfs config in %s. Skip backup of config file.' %cfg, self)
            return
        backup_folder = self.config.get_encfsconfig_backup_folder(self.profile_id)
        tools.make_dirs(backup_folder)
        old_backups = os.listdir(backup_folder)
        old_backups.sort(reverse = True)
        if len(old_backups):
            last_backup = os.path.join(backup_folder, old_backups[0])

            #don't create a new backup if config hasn't changed
            if tools._get_md5sum_from_path(cfg) == \
               tools._get_md5sum_from_path(last_backup):
                logger.debug('Encfs config did not change. Skip backup', self)
                return

        new_backup_file = '.'.join((os.path.basename(cfg), datetime.now().strftime('%Y%m%d%H%M') ))
        new_backup = os.path.join(backup_folder, new_backup_file)
        logger.debug('Create backup of encfs config %s to %s'
                     %(cfg, new_backup), self)
        shutil.copy2(cfg, new_backup)
Exemple #4
0
    def _check_hardlinks_(self):
        tmp_path = os.path.join(self.path, 'driveinfo.tmp')
        tools.make_dirs(tmp_path)
        if not os.path.isdir(tmp_path):
            return False

        file1_path = os.path.join(tmp_path, 'file1')
        file2_path = os.path.join(tmp_path, 'file2')

        ret_val = False

        os.system("echo abc > \"%s\"" % file1_path)
        os.system("ln \"%s\" \"%s\"" % (file1_path, file2_path))
        os.system("echo abc > \"%s\"" % file2_path)

        if os.path.exists(file1_path) and os.path.exists(file2_path):
            try:
                info1 = os.stat(file1_path)
                info2 = os.stat(file2_path)

                if info1.st_size == info2.st_size:
                    ret_val = True
            except:
                pass

        os.system("rm -rf \"%s\"" % tmp_path)
        return ret_val
def demo(config):
    # init loaders and base
    loaders = CustomedLoaders(config)
    base = DemoBase(config)

    # visualization
    base.resume_from_model(config.resume_visualize_model)
    make_dirs(config.visualize_output_path)
    visualize(config, base, loaders)
	def _create_directory( self, folder ):
		tools.make_dirs( folder )

		if not os.path.exists( folder ):
			logger.error( "Can't create directory: %s" % folder )
			self.set_take_snapshot_message( 1, _('Can\'t create directory: %s') % folder )
			os.system( 'sleep 2' ) #max 1 backup / second
			return False

		return True
	def __init__( self ):
		self._APP_PATH = os.path.abspath( os.path.dirname( __file__ ) )
		self._DOC_PATH = '/usr/share/doc/backintime'
		if os.path.exists( os.path.join( self._APP_PATH, 'LICENSE' ) ):
			self._DOC_PATH = self._APP_PATH

		self._GLOBAL_CONFIG_PATH = '/etc/backintime/config2'

		HOME_FOLDER = os.path.expanduser( '~' )
		self._LOCAL_DATA_FOLDER = os.path.join( os.getenv( 'XDG_DATA_HOME', '$HOME/.local/share' ).replace( '$HOME', HOME_FOLDER ), 'backintime' )
		self._LOCAL_CONFIG_FOLDER = os.path.join( os.getenv( 'XDG_CONFIG_HOME', '$HOME/.config' ).replace( '$HOME', HOME_FOLDER ), 'backintime' )

		#self._LOCAL_CONFIG_FOLDER = os.path.expanduser( '~/.config/backintime' )
		tools.make_dirs( self._LOCAL_CONFIG_FOLDER )
		tools.make_dirs( self._LOCAL_DATA_FOLDER )

		self._LOCAL_CONFIG_PATH = os.path.join( self._LOCAL_CONFIG_FOLDER, 'config2' )

		self.load( self._GLOBAL_CONFIG_PATH )
		self.append( self._LOCAL_CONFIG_PATH )

		OLD_CONFIG_PATH = os.path.join( self._LOCAL_CONFIG_FOLDER, 'config' )
		if os.path.exists( OLD_CONFIG_PATH ): 
			#import old config
			old_config = configfile.ConfigFile()
			old_config.load( OLD_CONFIG_PATH )

			dict = {
				'BASE_BACKUP_PATH' : 'snapshots.path',
				'INCLUDE_FOLDERS' : 'snapshots.include_folders',
				'EXCLUDE_PATTERNS' : 'snapshots.exclude_patterns',
				'AUTOMATIC_BACKUP' : 'snapshots.automatic_backup_mode',
				'REMOVE_OLD_BACKUPS' : 'snapshots.remove_old_snapshots.enabled',
				'REMOVE_OLD_BACKUPS_VALUE' : 'snapshots.remove_old_snapshots.value',
				'REMOVE_OLD_BACKUPS_UNIT' : 'snapshots.remove_old_snapshots.unit',
				'MIN_FREE_SPACE' : 'snapshots.min_free_space.enabled',
				'MIN_FREE_SPACE_VALUE' : 'snapshots.min_free_space.value',
				'MIN_FREE_SPACE_UNIT' : 'snapshots.min_free_space.unit',
				'DONT_REMOVE_NAMED_SNAPSHOTS' : 'snapshots.dont_remove_named_snapshots',
				'DIFF_CMD' : 'gnome.diff.cmd',
				'DIFF_CMD_PARAMS' : 'gnome.diff.params',
				'LAST_PATH' : 'gnome.last_path',
				'MAIN_WINDOW_X' : 'gnome.main_window.x',
				'MAIN_WINDOW_Y' : 'gnome.main_window.y',
				'MAIN_WINDOW_WIDTH' : 'gnome.main_window.width',
				'MAIN_WINDOW_HEIGHT' : 'gnome.main_window.height',
				'MAIN_WINDOW_HPANED1_POSITION' : 'gnome.main_window.hpaned1',
				'MAIN_WINDOW_HPANED2_POSITION' : 'gnome.main_window.hpaned2'
			}

			if self.get_if_dont_exists( dict, old_config ):
				self.save()

			os.system( "rm \"%s\"" % OLD_CONFIG_PATH )
Exemple #8
0
    def _create_directory(self, folder):
        tools.make_dirs(folder)

        if not os.path.exists(folder):
            logger.error("Can't create directory: %s" % folder)
            self.set_take_snapshot_message(
                1,
                _('Can\'t create directory: %s') % folder)
            os.system('sleep 2')  #max 1 backup / second
            return False

        return True
Exemple #9
0
    def init(self):
        tools.remove_files(self.experiment_dir)
        tools.make_dirs(self.experiment_dir)
        tools.make_file(self.setting.command_log)
        if check.check_file_exist(self.setting.command_log):
            tools.empty_file(self.setting.command_log)

        log.logger_experiment.info('unzipping file ' +
                                   self.experiment_file_name)
        tools.unzip(self.experiment_file_name, self.experiment_dir)
        ack_data = {'state': 0}
        return ack_data
Exemple #10
0
def main(config):

    # init loaders and base
    loaders = ReIDLoaders(config)
    base = Base(config)

    # make directions
    make_dirs(base.output_path)

    # init logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    assert config.mode in ['train', 'test', 'visualize']
    if config.mode == 'train':  # train mode

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            start_train_epoch = base.resume_last_model()

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

        # test
        base.save_model(config.total_train_epochs)
        mAP, CMC, pres, recalls, thresholds = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))
        plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC,
                             'none')

    elif config.mode == 'test':  # test mode
        base.resume_from_model(config.resume_test_model)
        mAP, CMC, pres, recalls, thresholds = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))
        logger(
            'Time: {}; Test Dataset: {}, \nprecision: {} \nrecall: {}\nthresholds: {}'
            .format(time_now(), config.test_dataset, mAP, CMC, pres, recalls,
                    thresholds))
        plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC,
                             'none')

    elif config.mode == 'visualize':  # visualization mode
        base.resume_from_model(config.resume_visualize_model)
        visualize(config, base, loaders)
Exemple #11
0
def updateSrv(*args):
    """
	unzip the zip file for deployserver or copyXml to /app/update/ of remote IP 
	args:
		srv:designate deploy service name like 'monevent'
		zipfilepath:designate source *.zip directory like '/home/test/console/temp/monevent.linux64.zip.21344'
		md5filepath:designate source *.md5 directory like '/home/test/console/temp/monevent.linux64.zip.md5.21344'
		cfg.XML_CONFIG: only for exec 'copyXml' indicate directory like '/home/test/app/update/monevent/config/'

	returns:
		0 indicate succ
		1 indicate failed
	"""
    try:
        back_flag = False
        ## check ZIP and MD5
        if not md5check(args[1], args[2]):
            print("FILE [" + zipfilepath + "] MD5 check error")
            return 1
        ## Backup
        if len(args) == 4:
            unzip_path = join_path(cfg.DEPLOY_PATH_UPDATE, args[0])
            dest_path = join_path(unzip_path, args[3])
        elif len(args) == 3:
            dest_path = join_path(cfg.DEPLOY_PATH_UPDATE, args[0])
        if os.path.isdir(dest_path):
            back_path = dest_path + '_bak.' + cfg.PID
            if not os.path.exists(back_path):
                shutil.move(dest_path, back_path)
                back_flag = True
        ## Unzip
        if len(args) == 4:
            print("unzip:" + args[1] + "to" + unzip_path)
            make_dirs(unzip_path)
            unzip_file(args[1], unzip_path)
        elif len(args) == 3:
            print("unzip:" + args[1] + "to" + cfg.DEPLOY_PATH_UPDATE)
            unzip_file(args[1], cfg.DEPLOY_PATH_UPDATE)
    except Exception as e:  ###!!! opt no key
        ## Restore
        if back_flag:
            if os.path.isdir(dest_path):
                shutil.rmtree(dest_path)
            shutil.move(back_path, dest_path)
        print(traceback.format_exc())
        return 1
    else:
        ## Drop backup
        if back_flag:
            shutil.rmtree(back_path)
        return 0
Exemple #12
0
def copyDirs(dir_list, filepath_src, filepath_dst):
    """
	make directory dir_dst like /home/test/app/run/monevent1/log /home/test/app/run/monevent1/flow \
		/home/test/app/run/monevent1/config /home/test/app/run/monevent1/bin
	
	args:
		dir_list:['/home/test/app/update/monevent/log', '/home/test/app/update/monevent/flow', '/home/test/app/update/monevent/config', '/home/test/app/update/monevent/bin']
		filepath_src:/home/test/app/update/monevent
		filepath_dst:/home/test/app/run/monevent1
	"""
    for dir_path in dir_list:
        dir_dst = join_path(filepath_dst, dir_path[len(filepath_src):])
        print("mkdir %s" % (dir_dst))
        make_dirs(dir_dst)
def main(config):

    # init loaders and base
    loaders = ReIDLoaders(config)
    base = Base(config)

    # make directions
    make_dirs(base.output_path)

    # init logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    assert config.mode in ['train', 'test', 'visualize']
    if config.mode == 'train':  # train mode

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            print('resume', base.output_path)
            start_train_epoch = base.resume_last_model()
        #start_train_epoch = 0

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs + 1):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

        # test
        base.save_model(config.total_train_epochs)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))

    elif config.mode == 'test':  # test mode
        base.resume_from_model(config.resume_test_model)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {} with len {}'.
               format(time_now(), config.test_dataset, mAP, CMC, len(CMC)))

    elif config.mode == 'visualize':  # visualization mode
        base.resume_from_model(config.resume_visualize_model)
        visualize(config, base, loaders)
Exemple #14
0
    def _check_usergroup_(self):
        tmp_path = os.path.join(self.path, 'driveinfo.tmp')
        tools.make_dirs(tmp_path)
        if not os.path.isdir(tmp_path):
            return False

        file_path = os.path.join(tmp_path, 'file')
        os.system("echo abc > \"%s\"" % file_path)
        if not os.path.isfile(file_path):
            return False

        ret_val = False

        uid = os.getuid()
        gid = os.getgid()

        try:
            info = os.stat(file_path)
            if info.st_uid == uid and info.st_gid == gid:
                ret_val = True
        except:
            pass

        if ret_val and uid == 0:
            #try to change the group
            import grp

            #search for another group
            new_gid = gid
            new_name = ''
            for group in grp.getgrall():
                if group.gr_gid != gid:
                    new_gid = group.gr_gid
                    new_name = group.gr_name
                    break

            if new_gid != gid:
                os.system("chgrp %s \"%s\"" % (new_name, file_path))
                try:
                    info = os.stat(file_path)
                    if info.st_gid != new_gid:
                        ret_val = False
                except:
                    ret_val = False

        os.system("rm -rf \"%s\"" % tmp_path)
        return ret_val
Exemple #15
0
    def _check_usergroup_( self ):
        tmp_path = os.path.join( self.path, 'driveinfo.tmp' )
        tools.make_dirs( tmp_path )
        if not os.path.isdir( tmp_path ):
            return False

        file_path = os.path.join( tmp_path, 'file' )
        os.system( "echo abc > \"%s\"" % file_path )
        if not os.path.isfile( file_path ):
            return False

        ret_val = False

        uid = os.getuid()
        gid = os.getgid()

        try:
            info = os.stat( file_path )
            if info.st_uid == uid and info.st_gid == gid:
                ret_val = True
        except:
            pass

        if ret_val and uid == 0:
            #try to change the group
            import grp

            #search for another group
            new_gid = gid
            new_name = ''
            for group in grp.getgrall():
                if group.gr_gid != gid:
                    new_gid = group.gr_gid
                    new_name = group.gr_name
                    break

            if new_gid != gid:
                os.system( "chgrp %s \"%s\"" % ( new_name, file_path ) )
                try:
                    info = os.stat( file_path )
                    if info.st_gid != new_gid:
                        ret_val = False
                except:
                    ret_val = False

        os.system( "rm -rf \"%s\"" % tmp_path )
        return ret_val
	def set_snapshots_path( self, value ):
		print "Snapshots path: %s" % value

		if len( value ) > 0:
			if not os.path.isdir( value ):
				return _( '%s is not a directory !' ) % value
			else:
				old_value = self.get_snapshots_path()
				self.set_str_value( 'snapshots.path', value )
				full_path = self.get_snapshots_full_path()
				self.set_str_value( 'snapshots.path', old_value )

				if not os.path.isdir( full_path ):
					tools.make_dirs( full_path )
					if not os.path.isdir( full_path ):
						return _( 'Can\'t write to: %s\nAre you sure have write access ?' ) % value

		self.set_str_value( 'snapshots.path', value )
		return None
Exemple #17
0
    def set_snapshots_path(self, value):
        print "Snapshots path: %s" % value

        if len(value) > 0:
            if not os.path.isdir(value):
                return _('%s is not a directory !') % value
            else:
                old_value = self.get_snapshots_path()
                self.set_str_value('snapshots.path', value)
                full_path = self.get_snapshots_full_path()
                self.set_str_value('snapshots.path', old_value)

                if not os.path.isdir(full_path):
                    tools.make_dirs(full_path)
                    if not os.path.isdir(full_path):
                        return _(
                            'Can\'t write to: %s\nAre you sure have write access ?'
                        ) % value

        self.set_str_value('snapshots.path', value)
        return None
Exemple #18
0
def main(config):
    loader = Loader(config)
    base = Base(config, loader)
    make_dirs(base.output_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_model_path)
    logger = Logger(os.path.join(base.save_logs_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:

            start_train_epoch = 0

        if config.auto_resume_training_from_lastest_step:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                base.resume_model(indexes[-1])
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        for current_epoch in range(start_train_epoch,
                                   config.total_train_epoch):
            base.save_model(current_epoch)

            if current_epoch < config.use_graph:
                _, result = train_meta_learning(base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0:
                    mAP, CMC = test(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))
            else:
                _, result = train_with_graph(config, base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0:
                    mAP, CMC = test_with_graph(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))

    elif config.mode == 'test':
        base.resume_model(config.resume_test_model)
        mAP, CMC = test_with_graph(config, base, loader)
        logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'.
               format(time_now(), config.target_dataset, mAP, CMC))
Exemple #19
0
    def _check_perms_(self):
        tmp_path = os.path.join(self.path, 'driveinfo.tmp')
        tools.make_dirs(tmp_path)
        if not os.path.isdir(tmp_path):
            return False

        file_path = os.path.join(tmp_path, 'file')
        os.system("echo abc > \"%s\"" % file_path)
        if not os.path.isfile(file_path):
            return False

        ret_val = False

        if self._check_perms_for_file_(file_path, '111'):
            if self._check_perms_for_file_(file_path, '700'):
                if self._check_perms_for_file_(file_path, '600'):
                    if self._check_perms_for_file_(file_path, '711'):
                        if self._check_perms_for_file_(file_path, '300'):
                            if self._check_perms_for_file_(file_path, '666'):
                                ret_val = True

        os.system("rm -rf \"%s\"" % tmp_path)
        return ret_val
Exemple #20
0
    def _check_perms_( self ):
        tmp_path = os.path.join( self.path, 'driveinfo.tmp' )
        tools.make_dirs( tmp_path )
        if not os.path.isdir( tmp_path ):
            return False

        file_path = os.path.join( tmp_path, 'file' )
        os.system( "echo abc > \"%s\"" % file_path )
        if not os.path.isfile( file_path ):
            return False

        ret_val = False

        if self._check_perms_for_file_( file_path, '111' ):
            if self._check_perms_for_file_( file_path, '700' ):
                if self._check_perms_for_file_( file_path, '600' ):
                    if self._check_perms_for_file_( file_path, '711' ):
                        if self._check_perms_for_file_( file_path, '300' ):
                            if self._check_perms_for_file_( file_path, '666' ):
                                ret_val = True

        os.system( "rm -rf \"%s\"" % tmp_path )
        return ret_val
Exemple #21
0
def task_receive_files(received_json, experiment, setting):
    file_url = received_json['data']['file_url']
    file_name = received_json['data']['file_name']

    if not os.path.exists(setting.storage_path):
        tools.make_dirs(setting.storage_path)
    file = os.path.join(setting.storage_path, file_name)
    experiment.set_experiment_file_name(file)

    download_url = "http://" + setting.web_server_ip + ":" + str(
        setting.web_server_port) + file_url
    file_data = urllib.request.urlopen(download_url).read()

    print('downloading file from' + download_url)
    output = open(file, "wb")
    output.write(file_data)
    output.close()
    print('download file', file)

    file_md5 = tools.call_md5(file_data)
    ack_data = json.dumps({"file_name": file_name, "check": file_md5})

    print('task_receive_files ack_data:', ack_data)
    return ack_data
Exemple #22
0
 def test_make_dirs_not_writeable(self):
     with TemporaryDirectory() as d:
         os.chmod(d, stat.S_IRUSR)
         path = os.path.join(d, 'foobar{}'.format(random.randrange(100, 999)))
         self.assertFalse(tools.make_dirs(path))
Exemple #23
0
 def test_make_dirs(self):
     self.assertFalse(tools.make_dirs('/'))
     self.assertTrue(tools.make_dirs(os.getcwd()))
     with TemporaryDirectory() as d:
         path = os.path.join(d, 'foo', 'bar')
         self.assertTrue(tools.make_dirs(path))
Exemple #24
0
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_models_path)
    make_dirs(config.save_features_path)

    # logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':

        # automatically resume model from the latest one
        start_train_epoch = 0
        root, _, files = os_walk(config.save_models_path)
        if len(files) > 0:
            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(base.model_list)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)),
                                       reverse=True)
            unavailable_indexes = list(
                set(indexes).difference(set(available_indexes)))

            if len(available_indexes
                   ) > 0:  # resume model from the latest model
                base.resume_model(available_indexes[0])
                start_train_epoch = available_indexes[0] + 1
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), available_indexes[0]))
            else:  #
                logger('Time: {}, there are no available models')

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.train_epoches):

            # test
            if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches:
                results = test(config, base, loaders, brief=True)
                for key in results.keys():
                    logger('Time: {}\n Setting: {}\n {}'.format(
                        time_now(), key, results[key]))

            # visualize generated images
            if current_epoch % 10 == 0 or current_epoch <= 10:
                visualize(config, loaders, base, current_epoch)

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=False,
                                         optimize_sl_enc=True)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         train_pixel=False,
                                         optimize_sl_enc=False)
            else:  # joint train
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=True,
                                         optimize_sl_enc=True)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # save model
            base.save_model(current_epoch)

        # test
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))
	def _take_snapshot( self, snapshot_id, now, include_folders, ignore_folders, dict, force ):
		#print "Snapshot: %s" % snapshot_id
		#print "\tInclude: %s" % include_folders
		#print "\tIgnore: %s" % ignore_folders
		#print "\tDict: %s" % dict

		self.set_take_snapshot_message( 0, _('...') )

		#snapshot_path = self.get_snapshot_path( snapshot_id )
		#snapshot_path_to = self.get_snapshot_path_to( snapshot_id )

		new_snapshot_id = 'new_snapshot'
		new_snapshot_path = self.get_snapshot_path( new_snapshot_id )

		if os.path.exists( new_snapshot_path ):
			self._execute( "rm -rf \"%s\"" % new_snapshot_path )
		
			if os.path.exists( new_snapshot_path ):
				logger.error( "Can't remove directory: %s" % new_snapshot_path )
				self.set_take_snapshot_message( 1, _('Can\'t remove directory: %s') % new_snapshot_path )
				os.system( 'sleep 2' ) #max 1 backup / second
				return False

		new_snapshot_path_to = self.get_snapshot_path_to( new_snapshot_id )

		#create exclude patterns string
		items = []
		for exclude in self.config.get_exclude_patterns():
			self._append_item_to_list( "--exclude=\"%s\"" % exclude, items )
		for folder in ignore_folders:
			self._append_item_to_list( "--exclude=\"%s\"" % folder, items )
		rsync_exclude = ' '.join( items )

		#create include patterns list
		items = []
		items2 = []
		for include_folder in include_folders:
			self._append_item_to_list( "--include=\"%s/**\"" % include_folder, items2 )
			while True:
				self._append_item_to_list( "--include=\"%s/\"" % include_folder, items )
				include_folder = os.path.split( include_folder )[0]
				if len( include_folder ) <= 1:
					break
		rsync_include = ' '.join( items )
		rsync_include2 = ' '.join( items2 )

		#rsync prefix & suffix
		rsync_prefix = 'rsync -a '
		rsync_suffix = ' --whole-file --delete ' + rsync_include + ' ' + rsync_exclude + ' ' + rsync_include2 + ' --exclude=\"*\" / '

		#update dict
		if not force:
			for folder in include_folders:
				dict[ folder ] = now

			self._set_last_snapshot_info( dict )

		#check previous backup
		snapshots = self.get_snapshots_list()
		prev_snapshot_id = ''

		#rsync_link_with = ''

		if len( snapshots ) > 0:
			prev_snapshot_id = snapshots[0]
			prev_snapshot_name = self.get_snapshot_display_id( prev_snapshot_id )
			self.set_take_snapshot_message( 0, _('Compare with snapshot %s') % prev_snapshot_name )
			logger.info( "Compare with old snapshot: %s" % prev_snapshot_id )
			
			prev_snapshot_folder = self.get_snapshot_path_to( prev_snapshot_id )
			cmd = rsync_prefix + ' -i --dry-run ' + rsync_suffix + '"' + prev_snapshot_folder + '"'
			try_cmd = self._execute_output( cmd, self._exec_rsync_compare_callback, prev_snapshot_name )
			changed = False

			for line in try_cmd.split( '\n' ):
				if len( line ) < 1:
					continue

				if line[0] != '.':
					changed = True
					break

			if not changed:
				logger.info( "Nothing changed, no back needed" )
				return False

			#create hard links
			
			#rsync_link_with = "--link-dest=\"%s\" " % prev_snapshot_folder

			if not self._create_directory( new_snapshot_path_to ):
				return False
			
			self.set_take_snapshot_message( 0, _('Create hard-links') )
			logger.info( "Create hard-links" )
			
			if force or len( ignore_folders ) == 0:
				cmd = "cp -al \"%s\"* \"%s\"" % ( self.get_snapshot_path_to( prev_snapshot_id ), new_snapshot_path_to )
				self._execute( cmd )
			else:
				for folder in include_folders:
					prev_path = self.get_snapshot_path_to( prev_snapshot_id, folder )
					new_path = self.get_snapshot_path_to( new_snapshot_id, folder )
					tools.make_dirs( new_path )
					cmd = "cp -alb \"%s\"* \"%s\"" % ( prev_path, new_path )
					self._execute( cmd )
			
			cmd = "chmod -R a+w \"%s\"" % new_snapshot_path
			self._execute( cmd )
		else:
			if not self._create_directory( new_snapshot_path_to ):
				return False

		#create new backup folder
		#if not self._create_directory( new_snapshot_path_to ):
		#	logger.error( "Can't create snapshot directory: %s" % new_snapshot_path_to )
		#	return False

		#sync changed folders
		logger.info( "Call rsync to take the snapshot" )
		#cmd = rsync_prefix + ' -v --delete-excluded --chmod=a-w ' + rsync_link_with + rsync_suffix + '"' + new_snapshot_path_to + '"'
		cmd = rsync_prefix + ' -v --delete-excluded ' + rsync_suffix + '"' + new_snapshot_path_to + '"'
		self.set_take_snapshot_message( 0, _('Take snapshot') )
		self._execute( cmd, self._exec_rsync_callback )

		#copy ignored directories
		if not force and len( prev_snapshot_id ) > 0 and len( ignore_folders ) > 0:
			for folder in ignore_folders:
				prev_path = self.get_snapshot_path_to( prev_snapshot_id, folder )
				new_path = self.get_snapshot_path_to( new_snapshot_id, folder )
				tools.make_dirs( new_path )
				cmd = "cp -alb \"%s/\"* \"%s\"" % ( prev_path, new_path )
				self._execute( cmd )

		#rename snapshot
		snapshot_path = self.get_snapshot_path( snapshot_id )
		os.system( "mv \"%s\" \"%s\"" % ( new_snapshot_path, snapshot_path ) )
		if not os.path.exists( snapshot_path ):
			logger.error( "Can't rename %s to %s" % ( new_snapshot_path, snapshot_path ) )
			self.set_take_snapshot_message( 1, _('Can\'t rename %s to %s') % ( new_snapshot_path, snapshot_path ) )
			os.system( 'sleep 2' ) #max 1 backup / second
			return False

		#make new folder read-only
		self._execute( "chmod -R a-w \"%s\"" % snapshot_path )
		return True
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_wp_models_path)
    make_dirs(config.save_st_models_path)
    make_dirs(config.save_features_path)

    logger = setup_logger('adaptation_reid', config.output_path, if_train=True)

    if config.mode == 'train':

        if config.resume:
            # automatically resume model from the latest one
            if config.resume_epoch_num == 0:
                start_train_epoch = 0
                root, _, files = os_walk(config.save_models_path)
                if len(files) > 0:
                    # get indexes of saved models
                    indexes = []
                    for file in files:
                        indexes.append(
                            int(file.replace('.pkl', '').split('_')[-1]))

                    # remove the bad-case and get available indexes
                    model_num = len(base.model_list)
                    available_indexes = copy.deepcopy(indexes)
                    for element in indexes:
                        if indexes.count(element) < model_num:
                            available_indexes.remove(element)

                    available_indexes = sorted(list(set(available_indexes)),
                                               reverse=True)
                    unavailable_indexes = list(
                        set(indexes).difference(set(available_indexes)))

                    if len(available_indexes
                           ) > 0:  # resume model from the latest model
                        base.resume_model(available_indexes[0])
                        start_train_epoch = available_indexes[0] + 1
                        logger.info(
                            'Time: {}, automatically resume training from the latest step (model {})'
                            .format(time_now(), available_indexes[0]))
                    else:  #
                        logger.info('Time: {}, there are no available models')
            else:
                start_train_epoch = config.resume_epoch_num
        else:
            start_train_epoch = 0

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.warmup_adaptation_epoches):

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         self_training=False,
                                         optimize_sl_enc=True,
                                         train_adaptation=False)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=False)  # joint train
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches:  #warmup adaptation
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=True)

            print("another epoch")
            logger.info('Time: {};  Epoch: {};  {}'.format(
                time_now(), current_epoch, results))
            # save model
            if current_epoch % config.save_model_interval == 0:
                base.save_model(current_epoch, True)

            if current_epoch % config.test_model_interval == 0:
                visualize(config, loaders, base, current_epoch)
                test(config, base, loaders, epoch=0, brief=False)

        total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches

        for iter_n in range(config.iteration_number):
            src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders(
            )

            trg_labeled_dataloader = generate_labeled_dataset(
                base, iter_n, src_dataset, src_dataloader, trg_dataset,
                trg_dataloader)
            for epoch in range(total_wp_epoches + 1, config.self_train_epoch):
                results = train_an_epoch(
                    config,
                    iter_n,
                    loaders,
                    base,
                    epoch,
                    train_gan=True,
                    train_reid=False,
                    self_training=True,
                    optimize_sl_enc=True,
                    trg_labeled_loader=trg_labeled_dataloader)
                logger.info('Time: {};  Epoch: {};  {}'.format(
                    time_now(), current_epoch, results))

                if epoch % config.save_model_interval == 0:
                    base.save_model(iter_n * config.self_train_epoch + epoch,
                                    False)

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        cmc, map = test(config, base, loaders, epoch=100, brief=False)
Exemple #27
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))
        # test
        testwithVer2(config,
                     logger,
                     base,
                     loaders,
                     'duke',
                     use_gcn=True,
                     use_gm=True)

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_path != '' and config.resume_test_epoch != 0:
            base.resume_model_from_path(config.resume_test_path,
                                        config.resume_test_epoch)
        else:
            assert 0, 'please set resume_test_path and resume_test_epoch '
        # test
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=False,
                                           use_gm=False)
        logger('Time: {},  base, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=False)
        logger(
            'Time: {},  base+gcn, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=True)
        logger('Time: {},  base+gcn+gm, Dataset: Duke  \nmAP: {} \nRank: {}'.
               format(time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_path != '' and config.resume_visualize_epoch != 0:
            base.resume_model_from_path(config.resume_visualize_path,
                                        config.resume_visualize_epoch)
            print('Time: {}, resume model from {} {}'.format(
                time_now(), config.resume_visualize_path,
                config.resume_visualize_epoch))
        # visualization
        if 'market' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'market')
        elif 'duke' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'duke')
        else:
            assert 0
Exemple #28
0
    def _take_snapshot(self, snapshot_id, now, include_folders, ignore_folders,
                       dict, force):
        #print "Snapshot: %s" % snapshot_id
        #print "\tInclude: %s" % include_folders
        #print "\tIgnore: %s" % ignore_folders
        #print "\tDict: %s" % dict

        self.set_take_snapshot_message(0, _('...'))

        #snapshot_path = self.get_snapshot_path( snapshot_id )
        #snapshot_path_to = self.get_snapshot_path_to( snapshot_id )

        new_snapshot_id = 'new_snapshot'
        new_snapshot_path = self.get_snapshot_path(new_snapshot_id)

        if os.path.exists(new_snapshot_path):
            self._execute("rm -rf \"%s\"" % new_snapshot_path)

            if os.path.exists(new_snapshot_path):
                logger.error("Can't remove directory: %s" % new_snapshot_path)
                self.set_take_snapshot_message(
                    1,
                    _('Can\'t remove directory: %s') % new_snapshot_path)
                os.system('sleep 2')  #max 1 backup / second
                return False

        new_snapshot_path_to = self.get_snapshot_path_to(new_snapshot_id)

        #create exclude patterns string
        items = []
        for exclude in self.config.get_exclude_patterns():
            self._append_item_to_list("--exclude=\"%s\"" % exclude, items)
        for folder in ignore_folders:
            self._append_item_to_list("--exclude=\"%s\"" % folder, items)
        rsync_exclude = ' '.join(items)

        #create include patterns list
        items = []
        items2 = []
        for include_folder in include_folders:
            self._append_item_to_list("--include=\"%s/**\"" % include_folder,
                                      items2)
            while True:
                self._append_item_to_list("--include=\"%s/\"" % include_folder,
                                          items)
                include_folder = os.path.split(include_folder)[0]
                if len(include_folder) <= 1:
                    break
        rsync_include = ' '.join(items)
        rsync_include2 = ' '.join(items2)

        #rsync prefix & suffix
        rsync_prefix = 'rsync -a '
        rsync_suffix = ' --whole-file --delete ' + rsync_include + ' ' + rsync_exclude + ' ' + rsync_include2 + ' --exclude=\"*\" / '

        #update dict
        if not force:
            for folder in include_folders:
                dict[folder] = now

            self._set_last_snapshot_info(dict)

        #check previous backup
        snapshots = self.get_snapshots_list()
        prev_snapshot_id = ''

        #rsync_link_with = ''

        if len(snapshots) > 0:
            prev_snapshot_id = snapshots[0]
            prev_snapshot_name = self.get_snapshot_display_id(prev_snapshot_id)
            self.set_take_snapshot_message(
                0,
                _('Compare with snapshot %s') % prev_snapshot_name)
            logger.info("Compare with old snapshot: %s" % prev_snapshot_id)

            prev_snapshot_folder = self.get_snapshot_path_to(prev_snapshot_id)
            cmd = rsync_prefix + ' -i --dry-run ' + rsync_suffix + '"' + prev_snapshot_folder + '"'
            try_cmd = self._execute_output(cmd,
                                           self._exec_rsync_compare_callback,
                                           prev_snapshot_name)
            changed = False

            for line in try_cmd.split('\n'):
                if len(line) < 1:
                    continue

                if line[0] != '.':
                    changed = True
                    break

            if not changed:
                logger.info("Nothing changed, no back needed")
                return False

            #create hard links

            #rsync_link_with = "--link-dest=\"%s\" " % prev_snapshot_folder

            if not self._create_directory(new_snapshot_path_to):
                return False

            self.set_take_snapshot_message(0, _('Create hard-links'))
            logger.info("Create hard-links")

            if force or len(ignore_folders) == 0:
                cmd = "cp -al \"%s\"* \"%s\"" % (self.get_snapshot_path_to(
                    prev_snapshot_id), new_snapshot_path_to)
                self._execute(cmd)
            else:
                for folder in include_folders:
                    prev_path = self.get_snapshot_path_to(
                        prev_snapshot_id, folder)
                    new_path = self.get_snapshot_path_to(
                        new_snapshot_id, folder)
                    tools.make_dirs(new_path)
                    cmd = "cp -alb \"%s\"* \"%s\"" % (prev_path, new_path)
                    self._execute(cmd)

            cmd = "chmod -R a+w \"%s\"" % new_snapshot_path
            self._execute(cmd)
        else:
            if not self._create_directory(new_snapshot_path_to):
                return False

        #create new backup folder
        #if not self._create_directory( new_snapshot_path_to ):
        #	logger.error( "Can't create snapshot directory: %s" % new_snapshot_path_to )
        #	return False

        #sync changed folders
        logger.info("Call rsync to take the snapshot")
        #cmd = rsync_prefix + ' -v --delete-excluded --chmod=a-w ' + rsync_link_with + rsync_suffix + '"' + new_snapshot_path_to + '"'
        cmd = rsync_prefix + ' -v --delete-excluded ' + rsync_suffix + '"' + new_snapshot_path_to + '"'
        self.set_take_snapshot_message(0, _('Take snapshot'))
        self._execute(cmd, self._exec_rsync_callback)

        #copy ignored directories
        if not force and len(prev_snapshot_id) > 0 and len(ignore_folders) > 0:
            for folder in ignore_folders:
                prev_path = self.get_snapshot_path_to(prev_snapshot_id, folder)
                new_path = self.get_snapshot_path_to(new_snapshot_id, folder)
                tools.make_dirs(new_path)
                cmd = "cp -alb \"%s/\"* \"%s\"" % (prev_path, new_path)
                self._execute(cmd)

        #rename snapshot
        snapshot_path = self.get_snapshot_path(snapshot_id)
        os.system("mv \"%s\" \"%s\"" % (new_snapshot_path, snapshot_path))
        if not os.path.exists(snapshot_path):
            logger.error("Can't rename %s to %s" %
                         (new_snapshot_path, snapshot_path))
            self.set_take_snapshot_message(
                1,
                _('Can\'t rename %s to %s') %
                (new_snapshot_path, snapshot_path))
            os.system('sleep 2')  #max 1 backup / second
            return False

        #make new folder read-only
        self._execute("chmod -R a-w \"%s\"" % snapshot_path)
        return True
Exemple #29
0
    def train_unet_patch(self):
        batch_size = 12
        test_batch_size = 12
        epochs = 5
        if self.data in ['ips', 'melanoma']:
            for data_num in [1]:#dataset
                clear_session()#keras parameter 初期化

                #keras用GPUを複数使う処理. なくてもいい
                physical_devices = tf.config.experimental.list_physical_devices('GPU')
                if len(physical_devices) > 0:
                    for k in range(len(physical_devices)):
                        tf.config.experimental.set_memory_growth(physical_devices[k], True)
                        print('memory growth:', tf.config.experimental.get_memory_growth(physical_devices[k]))
                else:
                    print("Not enough GPU hardware devices available")


                save_path = tl.get_save_path(self.data, self.method, self.model_name,
                                             self.patch_size, data_num)
                #stage毎にmodel loadの処理
                if self.stage == "stage1":
                    model = self.load_model()

                elif self.stage == "stage2":
                    save_path2 = save_path
                    save_path = save_path+"stage2/"
                    model = self.load_model()
                    model.load_weights(save_path2+'weights/weights.h5')

                elif self.stage == "stage3":
                    save_path2 = save_path+"stage2/"
                    save_path = save_path+"stage2/stage3/"
                    model = self.load_model()
                    model.load_weights(save_path2+'weights/weights.h5')

                elif self.stage == "stage4":
                    save_path2 = save_path+"stage2/stage3/"
                    save_path = save_path+"stage2/stage3/stage4/"
                    model = self.load_model()
                    model.load_weights(save_path2+'weights/weights.h5')

                #model.summary() 

                tl.make_dirs(self.data, self.method, self.model_name,
                            self.patch_size, data_num)
                train="/home/sora/new_project/crop/dataset_%d/train/"%data_num
                val="/home/sora/new_project/crop/dataset_%d/val/"%data_num
                start_time = timeit.default_timer()
                # train -----------------------------------------------------------
                #data_loaderの設定
                if self.data =="ips":
                    train_gen = newgenerator.ImageSequence(train,batch_size,"train",data_num)
                    valid_gen = newgenerator.ImageSequence(val, batch_size,"val",data_num)
                if self.data =="melanoma":
                    train_gen = newgenerator.ImageSequence_me(train,batch_size,"train",data_num)
                    valid_gen = newgenerator.ImageSequence_me(val, batch_size,"val",data_num)
                os.makedirs(save_path + 'weights', exist_ok=True)
                #val_lossを見てweightを保存するように設定
                model_checkpoint = ModelCheckpoint(
                    filepath=os.path.join(save_path,'weights', 'weights.h5'),
                    monitor="val_loss",
                    verbose=1,
                    save_best_only=True) 
                     #学習parameterをhistryに格納
                history = model.fit_generator(generator=train_gen,
                    epochs=epochs,
                    steps_per_epoch=len(train_gen),
                    verbose=1,callbacks=[model_checkpoint],
                    validation_data=valid_gen,
                    validation_steps=len(valid_gen),
                    max_queue_size=5)

                train_time = timeit.default_timer() - start_time
                     #学習parameter保存
                tl.save_parameter(save_path, self.model_name, self.patch_size,
                                  lr, epochs,
                                  batch_size, train_time)
                #lossをplot
                tl.draw_train_loss_plot(history, save_path)
                del train_gen,valid_gen,history,model_checkpoint,model
                gc.collect()
Exemple #30
0
 def test_make_dirs(self):
     self.assertFalse(tools.make_dirs('/'))
     self.assertTrue(tools.make_dirs(os.getcwd()))
     path = '/tmp/foobar{}'.format(random.randrange(100, 999))
     self.assertTrue(tools.make_dirs(path))
     os.rmdir(path)
Exemple #31
0
    def __init__(self):
        self._APP_PATH = os.path.abspath(os.path.dirname(__file__))
        self._DOC_PATH = '/usr/share/doc/backintime'
        if os.path.exists(os.path.join(self._APP_PATH, 'LICENSE')):
            self._DOC_PATH = self._APP_PATH

        self._GLOBAL_CONFIG_PATH = '/etc/backintime/config2'

        HOME_FOLDER = os.path.expanduser('~')
        self._LOCAL_DATA_FOLDER = os.path.join(
            os.getenv('XDG_DATA_HOME',
                      '$HOME/.local/share').replace('$HOME', HOME_FOLDER),
            'backintime')
        self._LOCAL_CONFIG_FOLDER = os.path.join(
            os.getenv('XDG_CONFIG_HOME',
                      '$HOME/.config').replace('$HOME', HOME_FOLDER),
            'backintime')

        #self._LOCAL_CONFIG_FOLDER = os.path.expanduser( '~/.config/backintime' )
        tools.make_dirs(self._LOCAL_CONFIG_FOLDER)
        tools.make_dirs(self._LOCAL_DATA_FOLDER)

        self._LOCAL_CONFIG_PATH = os.path.join(self._LOCAL_CONFIG_FOLDER,
                                               'config2')

        self.load(self._GLOBAL_CONFIG_PATH)
        self.append(self._LOCAL_CONFIG_PATH)

        OLD_CONFIG_PATH = os.path.join(self._LOCAL_CONFIG_FOLDER, 'config')
        if os.path.exists(OLD_CONFIG_PATH):
            #import old config
            old_config = configfile.ConfigFile()
            old_config.load(OLD_CONFIG_PATH)

            dict = {
                'BASE_BACKUP_PATH': 'snapshots.path',
                'INCLUDE_FOLDERS': 'snapshots.include_folders',
                'EXCLUDE_PATTERNS': 'snapshots.exclude_patterns',
                'AUTOMATIC_BACKUP': 'snapshots.automatic_backup_mode',
                'REMOVE_OLD_BACKUPS': 'snapshots.remove_old_snapshots.enabled',
                'REMOVE_OLD_BACKUPS_VALUE':
                'snapshots.remove_old_snapshots.value',
                'REMOVE_OLD_BACKUPS_UNIT':
                'snapshots.remove_old_snapshots.unit',
                'MIN_FREE_SPACE': 'snapshots.min_free_space.enabled',
                'MIN_FREE_SPACE_VALUE': 'snapshots.min_free_space.value',
                'MIN_FREE_SPACE_UNIT': 'snapshots.min_free_space.unit',
                'DONT_REMOVE_NAMED_SNAPSHOTS':
                'snapshots.dont_remove_named_snapshots',
                'DIFF_CMD': 'gnome.diff.cmd',
                'DIFF_CMD_PARAMS': 'gnome.diff.params',
                'LAST_PATH': 'gnome.last_path',
                'MAIN_WINDOW_X': 'gnome.main_window.x',
                'MAIN_WINDOW_Y': 'gnome.main_window.y',
                'MAIN_WINDOW_WIDTH': 'gnome.main_window.width',
                'MAIN_WINDOW_HEIGHT': 'gnome.main_window.height',
                'MAIN_WINDOW_HPANED1_POSITION': 'gnome.main_window.hpaned1',
                'MAIN_WINDOW_HPANED2_POSITION': 'gnome.main_window.hpaned2'
            }

            if self.get_if_dont_exists(dict, old_config):
                self.save()

            os.system("rm \"%s\"" % OLD_CONFIG_PATH)
Exemple #32
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:
            start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):

            # save model
            base.save_model(current_epoch)

            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # test
            if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0:
                market_map, market_rank = test(config, base, loaders, 'market')
                duke_map, duke_rank = test(config, base, loaders, 'duke')
                logger(
                    'Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
                        time_now(), market_map, market_rank))
                logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                    time_now(), duke_map, duke_rank))
                logger('')

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_epoch >= 0:
            base.resume_model(config.resume_test_epoch)
        # test
        market_map, market_rank = test(config, base, loaders, 'market')
        duke_map, duke_rank = test(config, base, loaders, 'duke')
        logger('Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
            time_now(), market_map, market_rank))
        logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_epoch >= 0:
            base.resume_model(config.resume_visualize_epoch)
        # visualization
        visualize_ranking_list(config, base, loaders, 'market')
        visualize_ranking_list(config, base, loaders, 'duke')