Exemplo n.º 1
0
	def CommitMatch(self,db, doValidation=True):
		self.ParseSpringOutput()
		ladder = db.GetLadder(self.ladder_id )
		gameid = self.gameid
		if doValidation and not self.CheckValidSetup( db ):
			raise InvalidOptionSetup( gameid, self.ladder_id )
		session = db.session()
		match = Match()
		match.date 	= datetime.datetime.now()
		match.modname  = ''
		replayzip = os.path.basename(self.replay).replace(".sdf",".zip")
		base = tasbot.config.Config().get('ladder','base_dir')
		replayzip = os.path.join( base, 'demos', replayzip)
		mkdir_p(replayzip)
		with zipfile.ZipFile(replayzip, mode='w') as f_out:
			f_out.write(self.replay, compress_type=zipfile.ZIP_DEFLATED)
		match.replay = replayzip
		match.game_id = gameid
		match.ladder_id = ladder.id
		match.last_frame = self.game_over
		match.duration = datetime.timedelta( seconds=float(match.last_frame) / 30.0 )
		session.add( match )
		session.commit()
		#session.refresh()
		matchid = match.id
		match.mapname = ''
		for key,val in self.options.iteritems():
			s = MatchSetting()
			s.key = key
			s.value = val
			if key == "mapname":
				query = session.query( Map ).filter( Map.name == val )
				if query.count() == 0:
					import xmlrpcdl
					db_map = xmlrpcdl.DownloadQueue().add_map(val)
					session.add(db_map)
					session.commit()
				else:
					db_map = query.one()
				match.mapname = db_map.name
				session.add( match )
				session.commit()
			if key == "modname":
				match.modname = val
				session.add( match )
				session.commit()
			s.match_id = match.id
			session.add( s )
			#session.commit()
		for key,val in self.restrictions.iteritems():
			s = MatchSetting()
			s.key = key
			s.value = val
			s.match_id = match.id
			session.add( s )
			session.commit()
		self.CommitPlayerResults(session,match)
		session.close()
		GlobalRankingAlgoSelector.GetInstance( ladder.ranking_algo_id ).Update( ladder.id, match, db )
		return matchid
Exemplo n.º 2
0
	def add_map(self,mapname):
		pattern = self._searchstring(mapname)
		result = self._proxy.springfiles.search(pattern)[0]
		name = result['name']
		nmap = Map()
		meta = result['metadata']
		nmap.startpos = [(f['x'],f['z']) for f in meta['StartPos'] ]
		nmap.name = name
		nmap.md5 = result['md5']
		try:
			imgurl = result['mapimages'][0]
			basedir = nmap.basedir(globe.config)
			basename = nmap.name + imgurl[-4:]
			local_fn = os.path.join( basedir, '1024', basename )
			globe.mkdir_p(local_fn)
			urllib.urlretrieve (imgurl, local_fn)
			img = Image.open(local_fn)
			for i in [128,256,512]:
				resized_img = img.resize((i, i), Image.ANTIALIAS)
				fn = os.path.join(basedir, str(i), basename)
				myutils.mkdir_p(fn)
				resized_img.save(fn)
			nmap.minimap = basename
		except Exception, e:
			globe.Log.exception(e)
			globe.Log.error('download for map %s failed'%nmap.name)
Exemplo n.º 3
0
    def save_state(self, conf, to_save_folder=False, model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path

        myutils.mkdir_p(save_path, delete=False)
        
        torch.save(
            self.model.state_dict(),
            save_path /
            ('model_{}_step:{}.pth'.format(get_time(), self.step)))
        if not model_only:
            torch.save(
                self.optimizer.state_dict(),
                save_path /
                ('optimizer_{}_step:{}.pth'.format(get_time(), self.step)))
Exemplo n.º 4
0
def get_config():

    parser = argparse.ArgumentParser('argument for training')

    parser.add_argument('--use_dataset',
                        type=str,
                        default='Cars',
                        choices=['CUB', 'Cars', 'SOP', 'Inshop'])
    # batch
    parser.add_argument('--batch_size', type=int, default=120)
    parser.add_argument('--instances', type=int, default=3)
    # optimization
    parser.add_argument('--lr', type=float, default=0.0)
    parser.add_argument('--lr_p', type=float, default=0.0)
    parser.add_argument('--lr_gamma', type=float, default=0.0)
    # model dataset
    parser.add_argument('--freeze_bn', type=int, default=1)
    # method
    parser.add_argument('--use_loss',
                        type=str,
                        default='triplet',
                        choices=['triplet', 'n-npair', 'semihtriplet', 'ms'])
    parser.add_argument('--sec_wei', type=float, default=0.0)
    parser.add_argument('--norm_momentum', type=float, default=1.0)
    parser.add_argument('--l2reg_wei', type=float, default=0.0)

    parser.add_argument('--test_sop_model', type=str, default='')

    conf = parser.parse_args()

    conf.num_devs = 1

    if conf.use_dataset == 'CUB':
        conf.lr = 1.0e-5 if conf.lr == 0 else conf.lr
        conf.lr_p = 0.5e-5 if conf.lr_p == 0 else conf.lr_p
        conf.weight_decay = 0.5 * 5e-3

        conf.start_step = 0
        conf.lr_gamma = 0.1 if conf.lr_gamma == 0 else conf.lr_gamma
        if conf.use_loss == 'ms':
            conf.step_milestones = [3000, 6000, 9000]
        else:
            conf.step_milestones = [5000, 9000, 9000]
        conf.steps = 8000

    elif conf.use_dataset == 'Cars':
        conf.lr = 1e-5 if conf.lr == 0 else conf.lr
        conf.lr_p = 1e-5 if conf.lr_p == 0 else conf.lr_p
        conf.weight_decay = 0.5 * 5e-3

        conf.start_step = 0
        if conf.lr_gamma == 0.1:
            conf.step_milestones = [2000, 9000, 9000]
        elif conf.lr_gamma == 0.5:
            conf.step_milestones = [4000, 6000, 9000]
        conf.steps = 8000

    elif conf.use_dataset == 'SOP':
        conf.lr = 2.5e-4 if conf.lr == 0 else conf.lr
        conf.lr_p = 0.5e-4 if conf.lr_p == 0 else conf.lr_p
        conf.weight_decay = 1e-5

        conf.start_step = 0
        conf.lr_gamma = 0.1 if conf.lr_gamma == 0 else conf.lr_gamma
        conf.step_milestones = [6e3, 18e3, 35e3]
        conf.steps = 12e3

    elif conf.use_dataset == 'Inshop':
        conf.lr = 5e-4 if conf.lr == 0 else conf.lr
        conf.lr_p = 1e-4 if conf.lr_p == 0 else conf.lr_p
        conf.weight_decay = 1e-5

        conf.start_step = 0
        conf.lr_gamma = 0.1 if conf.lr_gamma == 0 else conf.lr_gamma
        conf.step_milestones = [6e3, 18e3, 35e3]
        conf.steps = 12e3

    conf.device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")

    now_time = datetime.datetime.now().strftime('%m%d_%H%M')
    conf_work_path = 'work_space/' + conf.use_dataset + '_' + now_time
    myutils.mkdir_p(conf_work_path, delete=True)
    myutils.set_file_logger(work_dir=conf_work_path, log_level=logging.DEBUG)
    sys.stdout = myutils.Logger(conf_work_path + '/log-prt')
    sys.stderr = myutils.Logger(conf_work_path + '/log-prt-err')

    path0, path1 = conf_work_path.split('/')
    conf.log_path = Path(path0) / 'logs' / path1 / 'log'
    conf.work_path = Path(conf_work_path)
    conf.model_path = conf.work_path / 'models'
    conf.save_path = conf.work_path / 'save'

    conf.start_eval = False

    conf.num_workers = 8

    conf.bninception_pretrained_model_path = './pretrained_models/bn_inception-52deb4733.pth'

    conf.transform_dict = {}
    conf.use_simple_aug = False

    conf.transform_dict['rand-crop'] = \
        transforms.Compose([
            transforms.Resize(size=(256, 256)) if conf.use_simple_aug else transforms.Resize(size=256),
            transforms.RandomCrop((227, 227)) if conf.use_simple_aug else transforms.RandomResizedCrop(
                                                                              scale=[0.16, 1],
                                                                              size=227
                                                                          ),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[123 / 255.0, 117 / 255.0, 104 / 255.0],
                                 std=[1.0 / 255, 1.0 / 255, 1.0 / 255]),
            transforms.Lambda(lambda x: x[[2, 1, 0], ...]) #to BGR
        ])
    conf.transform_dict['center-crop'] = \
        transforms.Compose([
            transforms.Resize(size=(256, 256)) if conf.use_simple_aug else transforms.Resize(size=256),
            transforms.CenterCrop(227),
            transforms.ToTensor(),
            transforms.Normalize(mean=[123 / 255.0, 117 / 255.0, 104 / 255.0],
                                 std=[1.0 / 255, 1.0 / 255, 1.0 / 255]),
            transforms.Lambda(lambda x: x[[2, 1, 0], ...]) #to BGR
        ])

    return conf
Exemplo n.º 5
0
from beaker.util import parse_cache_config_options
import os
from bottle import static_file

from tasbot.customlog import Log
from tasbot.config import Config

from disqus import Disqus
import ladderdb 
from myutils import mkdir_p

		
cache_opts = {
    'cache.type': 'memory',
    'cache.data_dir': 'tmp/cache/data',
    'cache.lock_dir': 'tmp/cache/lock'
}

config = Config( 'Main.conf' )
Log.init( 'website.log' )
db = ladderdb.LadderDB(config.get('tasbot','alchemy-uri'))
env = Environment(loader=FileSystemLoader('templates'))
staging = config.get_bool('tasbot','staging')
cache = CacheManager(**parse_cache_config_options(cache_opts))
disqus = Disqus(config,cache)
mkdir_p(config.get('ladder','base_dir'))

def local_file(filename, sub, **kwargs):
	path = os.path.join(config.get('ladder','base_dir'),sub) 
	return static_file(filename, root=path)
Exemplo n.º 6
0
    def __init__(self, conf, inference=False):

        logging.info(f'metric learner use {conf}')
        self.model = torch.nn.DataParallel(BNInception()).cuda()
        logging.info(f'model generated')

        if not inference:

            if conf.use_dataset == 'CUB':
                self.dataset = MSBaseDataSet(conf, './datasets/CUB_200_2011/cub_train.txt',
                                           transform=conf.transform_dict['rand-crop'], mode='RGB')
            elif conf.use_dataset == 'Cars':
                self.dataset = MSBaseDataSet(conf, './datasets/CARS196/cars_train.txt',
                                             transform=conf.transform_dict['rand-crop'], mode='RGB')
            elif conf.use_dataset == 'SOP':
                self.dataset = MSBaseDataSet(conf, './datasets/SOP/sop_train.txt',
                                             transform=conf.transform_dict['rand-crop'], mode='RGB')
            elif conf.use_dataset == 'Inshop':
                self.dataset = MSBaseDataSet(conf, './datasets/Inshop/inshop_train.txt',
                                             transform=conf.transform_dict['rand-crop'], mode='RGB')

            self.loader = DataLoader(
                self.dataset, batch_size=conf.batch_size, num_workers=conf.num_workers,
                shuffle=False, sampler=RandomIdSampler(conf, self.dataset.label_index_dict), drop_last=True,
                pin_memory=True,
            )

            self.class_num = self.dataset.num_cls
            self.img_num = self.dataset.num_train

            myutils.mkdir_p(conf.log_path, delete=True)
            self.writer = SummaryWriter(str(conf.log_path))
            self.step = 0
            
            self.head_npair = NpairLoss().to(conf.device)
            self.head_semih_triplet = TripletSemihardLoss().to(conf.device)
            self.head_triplet = TripletLoss(instance=conf.instances).to(conf.device)
            self.head_multisimiloss = MultiSimilarityLoss().to(conf.device)
            logging.info('model heads generated')

            backbone_bn_para, backbone_wo_bn_para = [
                [p for k, p in self.model.named_parameters() if
                 ('bn' in k) == is_bn and ('head' in k) == False] for is_bn in [True, False]]

            head_bn_para, head_wo_bn_para = [
                [p for k, p in self.model.module.head.named_parameters() if
                 ('bn' in k) == is_bn] for is_bn in [True, False]]

            self.optimizer = optim.Adam([
                {'params': backbone_bn_para if conf.freeze_bn==False else [], 'lr': conf.lr_p},
                {'params': backbone_wo_bn_para, 'weight_decay': conf.weight_decay, 'lr': conf.lr_p},
                {'params': head_bn_para, 'lr': conf.lr},
                {'params': head_wo_bn_para, 'weight_decay': conf.weight_decay, 'lr': conf.lr},
            ])

            logging.info(f'{self.optimizer}, optimizers generated')

            if conf.use_dataset=='CUB' or conf.use_dataset=='Cars':
                self.board_loss_every = 20  
                self.evaluate_every = 100
                self.save_every = 1000
            elif conf.use_dataset=='Inshop':
                self.board_loss_every = 20  
                self.evaluate_every = 200
                self.save_every = 2000
            else:
                self.board_loss_every = 20  
                self.evaluate_every = 500
                self.save_every = 2000