コード例 #1
0
def main():
    global model_def
    global dataset
    global train
    args = argument_parser()
    config_module = importlib.import_module(args.config)
    configs = config_module.config
    model_def = importlib.import_module(args.model).model
    dataset = importlib.import_module(args.dataset).dataset
    train = importlib.import_module(args.trainer).train

    if 'algorithm' in configs.keys():
        comet_config = parse_comet_config(configs)
        opt = Optimizer(comet_config,
                        api_key=configs['API_KEY'],
                        project_name=configs['project_name'])
        for exp in opt.get_experiments():
            experiment = exp
            config = get_parameters(experiment, configs)
            train(
                **{
                    'config': config,
                    'model_def': model_def,
                    'dataset': dataset,
                    'experiment': experiment
                })
    else:
        if args.experiment:
            experiment = Experiment(api_key=configs['API_KEY'],
                                    project_name=configs['project_name'],
                                    workspace=configs['workspace'])
        else:
            experiment = None
        tried_configs = []
        end = False
        while True:
            importlib.reload(config_module)
            configs = config_module.config
            possible_configs = get_configurations(configs)
            for config_idx, config in enumerate(possible_configs):
                if config_idx == len(possible_configs) - 1:
                    end = True
                if config in tried_configs:
                    continue
                else:
                    tried_configs.append(config)
                    train(
                        **{
                            'config': config,
                            'model_def': model_def,
                            'dataset': dataset,
                            'experiment': experiment
                        })
                    break
            if end:
                break
        print("******************End of the training session****************")
コード例 #2
0
	def set_fitargs(self,fitargs,peculiar=[]):
		params = utils.get_parameters(fitargs)
		for key in peculiar:
			for prefix in [''] + utils.MINUIT_PREFIX + utils.OTHER_PREFIX:
				if prefix+key in fitargs:
					if prefix == 'latex_':
						fitargs['{}{}_{}'.format(prefix,key,self.parid)] = '{}^{{\\rm {}}}'.format(fitargs.pop(prefix+key),self.parid)
					else:
						fitargs['{}{}_{}'.format(prefix,key,self.parid)] = fitargs.pop(prefix+key)
		self.fitargs = fitargs
コード例 #3
0
def Adv_training_data_CW(training_data,
                         mean_cat,
                         cov_cat,
                         pi_cat,
                         mean_grass,
                         cov_grass,
                         pi_grass,
                         l=5,
                         target_index=1,
                         stride=8,
                         alpha=0.0001):
    perturbed_data_k = training_data
    W_cat, w_cat, w_0_cat = get_parameters(mean_cat, cov_cat, pi_cat)
    W_grass, w_grass, w_0_grass = get_parameters(mean_grass, cov_grass,
                                                 pi_grass)
    for i in range(300):
        current_grad = gradient_CW(patch_vec_k=perturbed_data_k,
                                   patch_vec_0=training_data,
                                   mean_cat=mean_cat,
                                   cov_cat=cov_cat,
                                   pi_cat=pi_cat,
                                   mean_grass=mean_grass,
                                   cov_grass=cov_grass,
                                   pi_grass=pi_grass,
                                   W_cat=W_cat,
                                   w_cat=w_cat,
                                   w_0_cat=w_0_cat,
                                   W_grass=W_grass,
                                   w_grass=w_grass,
                                   w_0_grass=w_0_grass,
                                   l=l,
                                   target_index=target_index)
        perturbed_data_k_1 = np.clip(perturbed_data_k - alpha * current_grad,
                                     0, 1)
        change = np.linalg.norm((perturbed_data_k_1 - perturbed_data_k))
        perturbed_data_k = perturbed_data_k_1
        if change < 0.001 / (2850):
            break
    return perturbed_data_k_1
コード例 #4
0
	def init(self,path_data=None,path_covariance=None):
		if not hasattr(self,'parid'): self.parid = self.id
		if not self.sorted: self.sorted = pyspectrum.utils.sorted_parameters(utils.get_parameters(self.fitargs))
		self._kwargs,self._amodel = {},[]
		self.set_model()
		if path_data is None:
			if path_covariance is None:
				self.logger.info('Using preloaded data {} and covariance {}.'.format(self.params['path_data'],self.params['path_covariance']))
			else:
				self.set_data_covariance(path_covariance)
				self.logger.info('Using preloaded covariance {}.'.format(self.params['path_covariance']))
		else:
			if path_covariance is not None: self.set_covariance(path_covariance)
			else: self.logger.info('Using preloaded covariance {}.'.format(self.params['path_covariance']))
			self.set_data(path_data)
		#self.check_compatibility()
		if 'spectrum' in self.estimator:
			self.geometry.set_kout(kout={ell:self.xdata[ill] for ill,ell in enumerate(self.ells)})
		else:
			self.geometry.set_sout(sout={ell:self.xdata[ill] for ill,ell in enumerate(self.ells)})
		self.set_lkl()
コード例 #5
0
def model(X_train, Y_train, user, userName, layers_dim, C = 4, lr = 0.1):
    L = len(layers_dim)
    parameters = get_parameters(layers_dim, userName, user, C)
    # global_cost = {}
    # for id in all_user:
    #     global_cost[str(id)] = []
    # Stochastic gradient descent
    X = X_train.reshape((2,1))
    Y_oh = convert_to_oh(Y_train, C).reshape((C, 1))
    caches, AL = forward_probagation(X, layers_dim, parameters)
    cost = cost_func(Y_oh, AL)
    grads = backward_probagation(Y_oh, caches, layers_dim)
    parameters = update_parameters(parameters, grads, layers_dim, lr)
    pck.dump(parameters, open("C:/Users/pevip/OneDrive/Documents/GitHub/JobSite/JobPosting/JobPosting/AI/RecommenderSystem/users/" + userName + "_profile.p", "wb"))
    pck.dump(C, open("C:/Users/pevip/OneDrive/Documents/GitHub/JobSite/JobPosting/JobPosting/AI/RecommenderSystem/prevLayer.p", "wb"))

    # global_cost[str(userID)].append(cost)

    # for id in all_user:
    #     plt.plot(global_cost[str(id)])
    #     plt.show()

    return parameters
コード例 #6
0
from generator import ER_generator, draw_anomalies
from basic_test import basic_features
from com_detection import community_detection
from spectral_localisation import spectral_features
from NetEMD import NetEMD_features
from path_finder import path_features

num_models = 20
num_nodes = 1000
num_basic_mc_samples = 500
num_references = 10
num_null_models = 60

ps = np.linspace(0.001, 0.05, 50)
ws = np.linspace(0.0, 0.01, 11)
candidate_parameters = get_parameters(num_nodes, ps, ws)
num_cand_param = len(candidate_parameters)

for model_id in range(num_models):
    p, w = candidate_parameters[np.random.choice(range(num_cand_param))]
    logging.info("Computing {}-th/{} model (p={:.3f}, w={:.3f})".format(
        model_id, num_models, p, w))
    graph = ER_generator(n=num_nodes, p=p, seed=None)
    graph = draw_anomalies(graph, w=1 - w)
    logging.info("\n\nGenerating null models\n\n")
    _, references = generate_null_models(graph,
                                         num_models=num_references,
                                         min_size=20)
    null_samples_whole, null_samples = generate_null_models(
        graph, num_models=num_null_models, min_size=20)
    logging.info("\n\nGenerating NetEMD features\n\n")
コード例 #7
0
ファイル: train_ddp.py プロジェクト: ravenj73/unbalanced-demo
def main():
    args = get_args()
    args.world_size = args.gpus * args.nodes
    args.rank = args.gpus * args.nr + args.local_rank
    print("RANK: " + str(args.rank) + ", LOCAL RANK: " + str(args.local_rank))

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('/home/admin/aihub/SinglePathOneShot/log'):
        os.mkdir('/home/admin/aihub/SinglePathOneShot/log')
    fh = logging.FileHandler(
        os.path.join(
            '/home/admin/aihub/SinglePathOneShot/log/train-{}{:02}{}'.format(
                local_time.tm_year % 2000, local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    assert os.path.exists(args.train_dir)
    train_dataset = datasets.ImageFolder(
        args.train_dir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4),
            transforms.RandomHorizontalFlip(0.5),
            ToBGRTensor(),
        ]))

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset, num_replicas=args.world_size, rank=args.rank)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=32,
                                               pin_memory=True,
                                               sampler=train_sampler)
    train_dataprovider = DataIterator(train_loader)

    assert os.path.exists(args.val_dir)
    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        args.val_dir,
        transforms.Compose([
            OpencvResize(256),
            transforms.CenterCrop(224),
            ToBGRTensor(),
        ])),
                                             batch_size=200,
                                             shuffle=False,
                                             num_workers=32,
                                             pin_memory=use_gpu)
    val_dataprovider = DataIterator(val_loader)

    print('load data successfully')

    dist.init_process_group(backend='nccl',
                            init_method='env://',
                            world_size=args.world_size,
                            rank=args.local_rank)
    #     dist.init_process_group(backend='nccl', init_method='tcp://'+args.ip+':'+str(args.port), world_size=args.world_size, rank=args.rank)
    #     dist.init_process_group(backend='nccl', init_method="file:///mnt/nas1/share_file", world_size=args.world_size, rank=args.rank)
    torch.cuda.set_device(args.local_rank)

    channels_scales = (1.0, ) * 20
    model = ShuffleNetV2_OneShot(architecture=list(args.arch),
                                 channels_scales=channels_scales)
    device = torch.device(args.local_rank)
    model = model.cuda(args.local_rank)

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lambda step: (1.0 - step / args.total_iters)
        if step <= args.total_iters else 0,
        last_epoch=-1)

    model = torch.nn.parallel.DistributedDataParallel(
        model, device_ids=[args.local_rank],
        find_unused_parameters=False)  #,output_device=args.local_rank) # ,
    loss_function = criterion_smooth.cuda()

    all_iters = 0

    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_dataprovider = train_dataprovider
    args.val_dataprovider = val_dataprovider

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model, device, args, all_iters=all_iters)
        exit(0)

    validate(model, device, args, all_iters=all_iters)

    while all_iters < args.total_iters:
        all_iters = train(model,
                          device,
                          args,
                          val_interval=args.val_interval,
                          bn_process=False,
                          all_iters=all_iters)
        validate(model, device, args, all_iters=all_iters)
    all_iters = train(model,
                      device,
                      args,
                      val_interval=int(1280000 / args.val_batch_size),
                      bn_process=True,
                      all_iters=all_iters)
    validate(model, device, args, all_iters=all_iters)
コード例 #8
0
ファイル: likelihood.py プロジェクト: adematti/montelss
 def parameters(self):
     return utils.get_parameters(self.fitargs)
コード例 #9
0
def main():
    args = get_args()

    # archLoader
    arch_loader = ArchLoader(args.path)

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(
        os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000,
                                                  local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    train_dataset, val_dataset = get_dataset('cifar100')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=16,
                                               pin_memory=True)
    # train_dataprovider = DataIterator(train_loader)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=200,
                                             shuffle=False,
                                             num_workers=12,
                                             pin_memory=True)

    # val_dataprovider = DataIterator(val_loader)
    print('load data successfully')

    model = mutableResNet20()

    print('load model successfully')

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

    if use_gpu:
        model = nn.DataParallel(model)
        loss_function = criterion_smooth.cuda()
        device = torch.device("cuda")
    else:
        loss_function = criterion_smooth
        device = torch.device("cpu")

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lambda step: (1.0 - step / args.total_iters)
        if step <= args.total_iters else 0,
        last_epoch=-1)

    model = model.to(device)

    # dp_model = torch.nn.parallel.DistributedDataParallel(model)

    all_iters = 0
    if args.auto_continue:  # 自动进行??
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            print('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    # 参数设置
    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_loader = train_loader
    args.val_loader = val_loader
    # args.train_dataprovider = train_dataprovider
    # args.val_dataprovider = val_dataprovider

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model,
                     device,
                     args,
                     all_iters=all_iters,
                     arch_loader=arch_loader)
        exit(0)

    while all_iters < args.total_iters:
        all_iters = train(model,
                          device,
                          args,
                          val_interval=args.val_interval,
                          bn_process=False,
                          all_iters=all_iters,
                          arch_loader=arch_loader,
                          arch_batch=args.arch_batch)
コード例 #10
0
            lats.append(chan.latitude)
            lons.append(chan.longitude)

sc = ax.scatter(lons,
                lats,
                200.,
                color='C1',
                edgecolor='k',
                marker="v",
                transform=ccrs.PlateCarree(),
                label='IW Stations')

mlon = np.mean(lons)
mlat = np.mean(lats)

paramdic = utils.get_parameters('P')
cat = client.get_events(starttime=stime,
                        endtime=etime,
                        minmagnitude=paramdic['min_mag'],
                        maxmagnitude=paramdic['max_mag'],
                        latitude=mlat,
                        longitude=mlon,
                        maxradius=paramdic['max_radius'],
                        minradius=paramdic['min_radius'])
evelats = []
evelons = []
for eve in cat:
    evelats.append(eve.origins[0].latitude)
    evelons.append(eve.origins[0].longitude)

print(evelons)
コード例 #11
0
ファイル: default.py プロジェクト: Rick7C2/xbmc-pneumatic
if (__name__ == "__main__" ):
    log('v%s started' % __settings__.getAddonInfo("version"), xbmc.LOGNOTICE)
    HANDLE = int(sys.argv[1])
    if not (__settings__.getSetting("firstrun")):
        __settings__.openSettings()
        if utils.pass_setup_test(SABNZBD.setup_streaming(), __settings__.getSetting("sabnzbd_incomplete")):
            __settings__.setSetting("firstrun", '1')
    else:
        if (not sys.argv[2]):
            add_posts({'title':'Incomplete'}, '', MODE_INCOMPLETE)
            add_posts({'title':'Browse local NZB\'s'}, '', MODE_LOCAL, '', '')
            xbmcplugin.setContent(HANDLE, 'movies')
            xbmcplugin.endOfDirectory(HANDLE, succeeded=True, cacheToDisc=True)
        else:
            params = utils.get_parameters(sys.argv[2])
            get = params.get
            if get("mode")== MODE_PLAY:
                is_home, sab_nzo_id = is_nzb_home(params)
                if is_home:
                    nzbname = utils.unquote_plus(get("nzbname"))
                    pre_play(nzbname, nzo=sab_nzo_id)
            if get("mode")== MODE_LIST_PLAY or get("mode")== MODE_AUTO_PLAY:
                play_video(params)
            if get("mode")== MODE_DELETE:
                delete(params)
            if get("mode")== MODE_DOWNLOAD:
                download(params)
            if get("mode")== MODE_REPAIR:
                repair(params)
            if get("mode")== MODE_INCOMPLETE:
コード例 #12
0
def main():
    args = get_args()

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(
        os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000,
                                                  local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    if args.cifar10 == False:

        assert os.path.exists(args.train_dir)
        train_dataset = datasets.ImageFolder(
            args.train_dir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.ColorJitter(brightness=0.4,
                                       contrast=0.4,
                                       saturation=0.4),
                transforms.RandomHorizontalFlip(0.5),
                ToBGRTensor(),
            ]))
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=1,
                                                   pin_memory=use_gpu)
        train_dataprovider = DataIterator(train_loader)

        assert os.path.exists(args.val_dir)
        val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            args.val_dir,
            transforms.Compose([
                OpencvResize(256),
                transforms.CenterCrop(224),
                ToBGRTensor(),
            ])),
                                                 batch_size=200,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=use_gpu)
        val_dataprovider = DataIterator(val_loader)
        print('load imagenet data successfully')

    else:
        train_transform, valid_transform = data_transforms(args)

        trainset = torchvision.datasets.CIFAR10(root=os.path.join(
            args.data_dir, 'cifar'),
                                                train=True,
                                                download=True,
                                                transform=train_transform)
        train_loader = torch.utils.data.DataLoader(trainset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   pin_memory=True,
                                                   num_workers=8)
        train_dataprovider = DataIterator(train_loader)
        valset = torchvision.datasets.CIFAR10(root=os.path.join(
            args.data_dir, 'cifar'),
                                              train=False,
                                              download=True,
                                              transform=valid_transform)
        val_loader = torch.utils.data.DataLoader(valset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 pin_memory=True,
                                                 num_workers=8)
        val_dataprovider = DataIterator(val_loader)

        print('load cifar10 data successfully')

    model = ShuffleNetV2_OneShot()

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

    if use_gpu:
        model = nn.DataParallel(model)
        loss_function = criterion_smooth.cuda()
        device = torch.device("cuda")
    else:
        loss_function = criterion_smooth
        device = torch.device("cpu")

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lambda step: (1.0 - step / args.total_iters)
        if step <= args.total_iters else 0,
        last_epoch=-1)

    model = model.to(device)

    all_iters = 0
    if args.auto_continue:
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            print('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_dataprovider = train_dataprovider
    args.val_dataprovider = val_dataprovider

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model, device, args, all_iters=all_iters)
        exit(0)

    while all_iters < args.total_iters:
        all_iters = train(model,
                          device,
                          args,
                          val_interval=args.val_interval,
                          bn_process=False,
                          all_iters=all_iters)
コード例 #13
0
def CW_attack_fast(img_0,
                   mean_cat_attack,
                   cov_cat_attack,
                   pi_cat_attack,
                   mean_grass_attack,
                   cov_grass_attack,
                   pi_grass_attack,
                   mean_cat_defense,
                   cov_cat_defense,
                   pi_cat_defense,
                   mean_grass_defense,
                   cov_grass_defense,
                   pi_grass_defense,
                   original_img,
                   truth,
                   l=5,
                   target_index=1,
                   stride=8,
                   alpha=0.0001,
                   display_iter=300,
                   title='',
                   path='./Outputs',
                   preprocessing=[None, None],
                   attack_type='blackbox'):
    iter_num = 0
    parallel_img_0 = parallel(img_0, stride=stride)
    img_k = img_0
    W_cat, w_cat, w_0_cat = get_parameters(mean_cat_attack, cov_cat_attack,
                                           pi_cat_attack)
    W_grass, w_grass, w_0_grass = get_parameters(mean_grass_attack,
                                                 cov_grass_attack,
                                                 pi_grass_attack)

    while iter_num < 300:
        iter_num += 1
        parallel_img_k = parallel(img_k, stride=stride)
        if attack_type == 'whitebox' and preprocessing[0] != None:
            parallel_img_k = preprocessing[0].forward(parallel_img_k)
            parallel_img_0 = preprocessing[0].forward(parallel_img_0)

        current_grad = gradient_CW(patch_vec_k=parallel_img_k,
                                   patch_vec_0=parallel_img_0,
                                   mean_cat=mean_cat_attack,
                                   cov_cat=cov_cat_attack,
                                   pi_cat=pi_cat_attack,
                                   mean_grass=mean_grass_attack,
                                   cov_grass=cov_grass_attack,
                                   pi_grass=pi_grass_attack,
                                   W_cat=W_cat,
                                   w_cat=w_cat,
                                   w_0_cat=w_0_cat,
                                   W_grass=W_grass,
                                   w_grass=w_grass,
                                   w_0_grass=w_0_grass,
                                   l=l,
                                   target_index=target_index)
        grad = unparallel_grad(current_grad, img_0, stride=stride)
        img_k_1 = np.clip(img_k - alpha * grad, 0, 1)
        change = np.linalg.norm((img_k_1 - img_k))
        img_k = img_k_1

        if (iter_num) % display_iter == 0:
            print("\n")
            display_image(img_perturbed=img_k_1,
                          mean_cat=mean_cat_defense,
                          cov_cat=cov_cat_defense,
                          pi_cat=pi_cat_defense,
                          mean_grass=mean_grass_defense,
                          cov_grass=cov_grass_defense,
                          pi_grass=pi_grass_defense,
                          original_img=original_img,
                          truth=truth,
                          title=title + 'iter_' + str(iter_num),
                          stride=stride,
                          preprocessing=preprocessing[1],
                          path=path)

            print(' Change:{}'.format(change))
        if change < 0.001 and stride == 8:
            print("\n\nMax Iteration:" + str(iter_num))
            break
        elif change < 0.01 and stride == 1:
            print("\n\nMax Iteration:" + str(iter_num))
            break

    return img_k_1
コード例 #14
0
        tree = spatial.cKDTree(matrix)

    return tree.query(vector)


df = utils.create_finger_print_data_frame(cs.FINGERPRINT_FILE,
                                          cs.PARAMETER_FILE,
                                          cs.PARAMETER_KEY,
                                          cs.CACHE_FILE,
                                          force=True)

import sklearn
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import train_test_split

parameters = utils.get_parameters()

y = df.iloc[:, -len(parameters):]
x = df.iloc[:, :-len(parameters)]

real = x.values.real
imag = x.values.imag
x = numpy.concatenate((real, imag), axis=1)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

knn = KNeighborsRegressor()

knn.fit(x_train, y_train)

predicted = knn.predict(x_test)
コード例 #15
0
def main():
    args = get_args()

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout, level=logging.INFO,
        format=log_format, datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    assert os.path.exists(args.train_dir)
    train_dataset = datasets.ImageFolder(
        args.train_dir,
        transforms.Compose([
            transforms.RandomResizedCrop(args.im_size),
            transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
            transforms.RandomHorizontalFlip(0.5),
            ToBGRTensor(),
        ])
    )
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=8, pin_memory=use_gpu)
    train_dataprovider = DataIterator(train_loader)

    assert os.path.exists(args.val_dir)
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(args.val_dir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(args.im_size),
            ToBGRTensor(),
        ])),
        batch_size=200, shuffle=False,
        num_workers=8, pin_memory=use_gpu
    )
    val_dataprovider = DataIterator(val_loader)
    print('load data successfully')

    arch_path='arch.pkl'

    if os.path.exists(arch_path):
        with open(arch_path,'rb') as f:
            architecture=pickle.load(f)
    else:
        raise NotImplementedError
    channels_scales = (1.0,)*20
    model = ShuffleNetV2_OneShot(architecture=architecture, channels_scales=channels_scales, n_class=args.num_classes, input_size=args.im_size)

    print('flops:',get_flops(model))

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    criterion_smooth = CrossEntropyLabelSmooth(args.num_classes, 0.1)

    if use_gpu:
        # model = nn.DataParallel(model)
        loss_function = criterion_smooth.cuda()
        device = torch.device("cuda")
    else:
        loss_function = criterion_smooth
        device = torch.device("cpu")

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                    lambda step : (1.0-step/args.total_iters) if step <= args.total_iters else 0, last_epoch=-1)

    # model = model.to(device)
    model = model.cuda()

    all_iters = 0
    if args.auto_continue:
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model, map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            print('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_dataprovider = train_dataprovider
    args.val_dataprovider = val_dataprovider

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume, map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model, device, args, all_iters=all_iters)
        exit(0)
    t = time.time()
    while all_iters < args.total_iters:
        all_iters = train(model, device, args, val_interval=args.val_interval, bn_process=False, all_iters=all_iters)
        validate(model, device, args, all_iters=all_iters)
    # all_iters = train(model, device, args, val_interval=int(1280000/args.batch_size), bn_process=True, all_iters=all_iters)
    validate(model, device, args, all_iters=all_iters)
    save_checkpoint({'state_dict': model.state_dict(),}, args.total_iters, tag='bnps-')
    print("Finished {} iters in {:.3f} seconds".format(all_iters, time.time()-t))
コード例 #16
0
def main():
    args = get_args()
    num_gpus = torch.cuda.device_count()
    args.gpu = args.local_rank % num_gpus
    torch.cuda.set_device(args.gpu)

    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    args.world_size = torch.distributed.get_world_size()
    args.batch_size = args.batch_size // args.world_size

    # archLoader
    arch_loader = ArchLoader(args.path)

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%m-%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(
        os.path.join('log/train-{}-{:02}-{:02}-{:.3f}'.format(
            local_time.tm_year % 2000, local_time.tm_mon, local_time.tm_mday,
            t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    train_loader = get_train_loader(args.batch_size, args.local_rank,
                                    args.num_workers, args.total_iters)

    val_loader = get_val_loader(args.batch_size, args.num_workers)

    model = mutableResNet20()

    logging.info('load model successfully')

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

    if use_gpu:
        # model = nn.DataParallel(model)
        model = model.cuda(args.gpu)
        model = nn.parallel.DistributedDataParallel(
            model,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            find_unused_parameters=True)
        loss_function = criterion_smooth.cuda()
    else:
        loss_function = criterion_smooth

    scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=5)

    all_iters = 0

    if args.auto_continue:  # 自动进行??
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            logging.info('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    # 参数设置
    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_loader = train_loader
    args.val_loader = val_loader

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model, args, all_iters=all_iters, arch_loader=arch_loader)
        exit(0)

    # warmup weights
    if args.warmup > 0:
        logging.info("begin warmup weights")
        while all_iters < args.warmup:
            all_iters = train_supernet(model,
                                       args,
                                       bn_process=False,
                                       all_iters=all_iters)

        validate(model, args, all_iters=all_iters, arch_loader=arch_loader)

    while all_iters < args.total_iters:
        logging.info("=" * 50)
        all_iters = train_subnet(model,
                                 args,
                                 bn_process=False,
                                 all_iters=all_iters,
                                 arch_loader=arch_loader)

        if all_iters % 200 == 0 and args.local_rank == 0:
            logging.info("validate iter {}".format(all_iters))

            validate(model, args, all_iters=all_iters, arch_loader=arch_loader)
コード例 #17
0
def main():
    args = get_args()

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(
        os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000,
                                                  local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    assert os.path.exists(args.train_dir)
    train_dataset = datasets.ImageFolder(
        args.train_dir,
        transforms.Compose([
            transforms.RandomResizedCrop(96),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4),
            transforms.RandomHorizontalFlip(0.5),
            ToBGRTensor(),
        ]))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=use_gpu)
    train_dataprovider = DataIterator(train_loader)

    assert os.path.exists(args.val_dir)
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            args.val_dir,
            transforms.Compose([
                OpencvResize(96),
                # transforms.CenterCrop(96),
                ToBGRTensor(),
            ])),
        batch_size=200,
        shuffle=False,
        num_workers=4,
        pin_memory=use_gpu)
    val_dataprovider = DataIterator(val_loader)

    arch_path = 'cl400.p'

    if os.path.exists(arch_path):
        with open(arch_path, 'rb') as f:
            architectures = pickle.load(f)
    else:
        raise NotImplementedError
    channels_scales = (1.0, ) * 20
    cands = {}
    splits = [(i, 10 + i) for i in range(0, 400, 10)]
    architectures = np.array(architectures)
    architectures = architectures[
        splits[args.split_num][0]:splits[args.split_num][1]]
    print(len(architectures))
    logging.info("Training and Validating arch: " +
                 str(splits[args.split_num]))
    for architecture in architectures:
        architecture = tuple(architecture.tolist())
        model = ShuffleNetV2_OneShot(architecture=architecture,
                                     channels_scales=channels_scales,
                                     n_class=10,
                                     input_size=96)

        print('flops:', get_flops(model))

        optimizer = torch.optim.SGD(get_parameters(model),
                                    lr=args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

        if use_gpu:
            model = nn.DataParallel(model)
            loss_function = criterion_smooth.cuda()
            device = torch.device("cuda")
        else:
            loss_function = criterion_smooth
            device = torch.device("cpu")

        scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer,
            lambda step: (1.0 - step / args.total_iters)
            if step <= args.total_iters else 0,
            last_epoch=-1)

        model = model.to(device)

        all_iters = 0
        if args.auto_continue:
            lastest_model, iters = get_lastest_model()
            if lastest_model is not None:
                all_iters = iters
                checkpoint = torch.load(
                    lastest_model, map_location=None if use_gpu else 'cpu')
                model.load_state_dict(checkpoint['state_dict'], strict=True)
                print('load from checkpoint')
                for i in range(iters):
                    scheduler.step()

        args.optimizer = optimizer
        args.loss_function = loss_function
        args.scheduler = scheduler
        args.train_dataprovider = train_dataprovider
        args.val_dataprovider = val_dataprovider
        # print("BEGIN VALDATE: ", args.eval, args.eval_resume)
        if args.eval:
            if args.eval_resume is not None:
                checkpoint = torch.load(
                    args.eval_resume, map_location=None if use_gpu else 'cpu')
                model.load_state_dict(checkpoint, strict=True)
                validate(model, device, args, all_iters=all_iters)
            exit(0)
        # t1,t5 = validate(model, device, args, all_iters=all_iters)
        # print("VALDATE: ", t1, "   ", t5)

        while all_iters < args.total_iters:
            all_iters = train(model,
                              device,
                              args,
                              val_interval=args.val_interval,
                              bn_process=False,
                              all_iters=all_iters)
            validate(model, device, args, all_iters=all_iters)
        all_iters = train(model,
                          device,
                          args,
                          val_interval=int(1280000 / args.batch_size),
                          bn_process=True,
                          all_iters=all_iters)
        top1, top5 = validate(model, device, args, all_iters=all_iters)
        save_checkpoint({
            'state_dict': model.state_dict(),
        },
                        args.total_iters,
                        tag='bnps-')
        cands[architecture] = [top1, top5]
        pickle.dump(
            cands,
            open("from_scratch_split_{}.pkl".format(args.split_num), 'wb'))
コード例 #18
0
ファイル: predict.py プロジェクト: jakelong0509/JobSite
import sys
import numpy as np
import csv
import matplotlib.pyplot as plt
import pickle as pck
from utils import get_parameters, sigmoid, sigmoid_backward, softmax, softmax_backward, convert_to_oh, convert_dictionary_to_vector, convert_vector_to_dictionary, initialize_parameters, add_parameters, subtract_parameters
from Train import model, predict

if __name__ == "__main__":
    n_y = pck.load(
        open(
            r"C:\Users\pevip\OneDrive\Documents\GitHub\JobSite\JobPosting\JobPosting\AI\RecommenderSystem\prevlayer.p",
            "rb"))
    n_y = int(n_y)
    layers_dim = [2, 6, 10, n_y]
    user = int(sys.argv[1])
    prev1 = int(sys.argv[2])
    prev2 = int(sys.argv[3])
    userName = sys.argv[4]
    X = np.array([prev1, prev2], dtype=np.int).reshape((2, 1))
    parameters = get_parameters(layers_dim, userName, user, C=n_y)
    Y = predict(X, user, parameters, layers_dim)
    print(Y)
コード例 #19
0
def main():
    parser = argparse.ArgumentParser(
        "Variational autoencoders for collaborative filtering")
    parser.add_argument('cmd', type=str, choices=['train'], help='train')
    parser.add_argument('--arch_type',
                        type=str,
                        default='MultiVAE',
                        help='architecture',
                        choices=['MultiVAE', 'MultiDAE'])
    parser.add_argument('--dataset_name',
                        type=str,
                        default='ml-20m',
                        help='camera model type',
                        choices=['ml-20m', 'lastfm-360k'])
    parser.add_argument('--processed_dir',
                        type=str,
                        default='O:/dataset/vae_cf/data/ml-20m/pro_sg',
                        help='dataset directory')
    parser.add_argument('--n_items', type=int, default=1, help='n items')
    parser.add_argument(
        '--conditioned_on',
        type=str,
        default=None,
        help=
        'conditioned on user profile (g: gender, a: age, c: country) for Last.fm'
    )
    parser.add_argument('--checkpoint_dir',
                        type=str,
                        default='./checkpoint/',
                        help='checkpoints directory')
    parser.add_argument('--checkpoint_freq',
                        type=int,
                        default=1,
                        help='checkpoint save frequency')
    parser.add_argument('--valid_freq',
                        type=int,
                        default=1,
                        help='validation frequency in training')
    parser.add_argument(
        '-c',
        '--config',
        type=int,
        default=1,
        choices=configurations.keys(),
        help='the number of settings and hyperparameters used in training')
    parser.add_argument('--start_step',
                        dest='start_step',
                        type=int,
                        default=0,
                        help='start step')
    parser.add_argument('--total_steps',
                        dest='total_steps',
                        type=int,
                        default=int(3e5),
                        help='Total number of steps')
    parser.add_argument('--batch_size', type=int, default=1, help='batch size')
    parser.add_argument('--train_batch_size',
                        type=int,
                        default=1,
                        help='batch size')
    parser.add_argument('--valid_batch_size',
                        type=int,
                        default=1,
                        help='batch size in validation')
    parser.add_argument('--test_batch_size',
                        type=int,
                        default=1,
                        help='batch size in test')
    parser.add_argument('--print_freq',
                        type=int,
                        default=1,
                        help='log print frequency')
    parser.add_argument('--upper_train',
                        type=int,
                        default=-1,
                        help='max of train images(for debug)')
    parser.add_argument('--upper_valid',
                        type=int,
                        default=-1,
                        help='max of valid images(for debug)')
    parser.add_argument('--upper_test',
                        type=int,
                        default=-1,
                        help='max of test images(for debug)')
    parser.add_argument(
        '--total_anneal_steps',
        type=int,
        default=0,
        help='the total number of gradient updates for annealing')
    parser.add_argument('--anneal_cap',
                        type=float,
                        default=0.2,
                        help='largest annealing parameter')
    parser.add_argument('--dropout_p',
                        dest='dropout_p',
                        type=float,
                        default=0.5,
                        help='dropout rate')
    parser.add_argument('--gpu', type=int, default=0, help='GPU id')
    parser.add_argument('-j',
                        '--workers',
                        default=4,
                        type=int,
                        metavar='N',
                        help='number of data loading workers (default: 4)')
    args = parser.parse_args()

    if args.cmd == 'train':
        os.makedirs(args.checkpoint_dir, exist_ok=True)
        cfg = configurations[args.config]

    print(args)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()
    if cuda:
        print("torch.backends.cudnn.version: {}".format(
            torch.backends.cudnn.version()))

    torch.manual_seed(98765)
    if cuda:
        torch.cuda.manual_seed(98765)

    # # 1. data loader
    kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
    root = args.processed_dir

    DS = dataset.MovieLensDataset if args.dataset_name == 'ml-20m' else dataset.LastfmDataset
    if args.cmd == 'train':
        dt = DS(root,
                'data_csr.pkl',
                split='train',
                upper=args.upper_train,
                conditioned_on=args.conditioned_on)
        train_loader = torch.utils.data.DataLoader(
            dt, batch_size=args.train_batch_size, shuffle=True, **kwargs)

        dt = DS(root,
                'data_csr.pkl',
                split='valid',
                upper=args.upper_valid,
                conditioned_on=args.conditioned_on)
        valid_loader = torch.utils.data.DataLoader(
            dt, batch_size=args.valid_batch_size, shuffle=False, **kwargs)

        dt = DS(root,
                'data_csr.pkl',
                split='test',
                upper=args.upper_test,
                conditioned_on=args.conditioned_on)
        test_loader = torch.utils.data.DataLoader(
            dt, batch_size=args.test_batch_size, shuffle=False, **kwargs)

    # 2. model
    n_conditioned = 0
    if args.conditioned_on:  # used for conditional VAE
        if 'g' in args.conditioned_on:
            n_conditioned += 3
        if 'a' in args.conditioned_on:
            n_conditioned += 10
        if 'c' in args.conditioned_on:
            n_conditioned += 17

    if 'MultiVAE' in args.arch_type:
        model = MultiVAE(dropout_p=args.dropout_p,
                         weight_decay=0.0,
                         cuda2=cuda,
                         q_dims=[args.n_items, 600, 200],
                         p_dims=[200, 600, args.n_items],
                         n_conditioned=n_conditioned)
    if 'MultiDAE' in args.arch_type:
        model = MultiDAE(dropout_p=args.dropout_p,
                         weight_decay=0.01 / args.train_batch_size,
                         cuda2=cuda)
    print(model)

    start_epoch = 0
    start_step = 0

    if cuda:
        model = model.cuda()

    # 3. optimizer
    if args.cmd == 'train':
        optim = torch.optim.Adam(
            [
                {
                    'params': list(utils.get_parameters(model, bias=False)),
                    'weight_decay': 0.0
                },
                {
                    'params': list(utils.get_parameters(model, bias=True)),
                    'weight_decay': 0.0
                },
            ],
            lr=cfg['lr'],
        )

        # lr_policy: step
        last_epoch = -1
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optim,
            milestones=[100, 150],
            gamma=cfg['gamma'],
            last_epoch=last_epoch)

    if args.cmd == 'train':
        trainer = Trainer(
            cmd=args.cmd,
            cuda=cuda,
            model=model,
            optim=optim,
            lr_scheduler=lr_scheduler,
            train_loader=train_loader,
            valid_loader=valid_loader,
            test_loader=test_loader,
            start_step=start_step,
            total_steps=args.total_steps,
            interval_validate=args.valid_freq,
            checkpoint_dir=args.checkpoint_dir,
            print_freq=args.print_freq,
            checkpoint_freq=args.checkpoint_freq,
            total_anneal_steps=args.total_anneal_steps,
            anneal_cap=args.anneal_cap,
        )
        trainer.train()
コード例 #20
0
def CW_attack(img_0,
              mean_cat,
              cov_cat,
              pi_cat,
              mean_grass,
              cov_grass,
              pi_grass,
              original_img,
              truth,
              l=5,
              target_index=1,
              stride=8,
              alpha=0.0001,
              display_iter=300,
              title='',
              preprocessing=None):
    iter_num = 0
    img_perturbed_k = np.copy(img_0)
    img_perturbed_k_1 = np.copy(img_0)
    W_cat, w_cat, w_0_cat = get_parameters(mean_cat, cov_cat, pi_cat)
    W_grass, w_grass, w_0_grass = get_parameters(mean_grass, cov_grass,
                                                 pi_grass)
    while iter_num < 300:
        iter_num += 1
        grad = np.zeros_like(img_0)
        for i in range(4, img_0.shape[0] - 4,
                       stride):  #loop starting form zero to center the output
            for j in range(
                    4, img_0.shape[1] - 4,
                    stride):  #loop starting form zero to center the output
                patch_vec_0 = img_0[i - 4:i + 4, j - 4:j + 4].reshape((64, 1))
                patch_vec_k = img_perturbed_k[i - 4:i + 4,
                                              j - 4:j + 4].reshape((64, 1))
                grad[i - 4:i + 4, j - 4:j + 4] += gradient_CW(
                    patch_vec_k=patch_vec_k,
                    patch_vec_0=patch_vec_0,
                    mean_cat=mean_cat,
                    cov_cat=cov_cat,
                    pi_cat=pi_cat,
                    mean_grass=mean_grass,
                    cov_grass=cov_grass,
                    pi_grass=pi_grass,
                    W_cat=W_cat,
                    w_cat=w_cat,
                    w_0_cat=w_0_cat,
                    W_grass=W_grass,
                    w_grass=w_grass,
                    w_0_grass=w_0_grass,
                    l=l,
                    target_index=target_index).reshape((8, 8))

        img_perturbed_k_1 = np.clip(img_perturbed_k - alpha * grad, 0, 1)
        change = np.linalg.norm((img_perturbed_k_1 - img_perturbed_k))
        img_perturbed_k = img_perturbed_k_1
        if (iter_num) % display_iter == 0:
            print("\n")
            display_image(img_perturbed=img_perturbed_k,
                          mean_cat=mean_cat,
                          cov_cat=cov_cat,
                          pi_cat=pi_cat,
                          mean_grass=mean_grass,
                          cov_grass=cov_grass,
                          pi_grass=pi_grass,
                          original_img=original_img,
                          truth=truth,
                          title=title + 'iter_' + str(iter_num),
                          stride=stride,
                          preprocessing=preprocessing)
            print(' Change:{}'.format(change))
        if change < 0.001 and stride == 8:
            break
        elif change < 0.01 and stride == 1:
            break
    return img_perturbed_k
コード例 #21
0
 def main(self):
     params = utils.get_parameters(sys.argv[2])
     method = self.patterns.get(params.get('mode'))
     if not method:
         return self.menu()
     return method(**params)
コード例 #22
0
             mu=mu,
             theta=theta,
             sigma=sigma,
             gamma=gamma,
             tau=tau,
             train_type=TRAIN_TYPE,
             actor_lr=actor_lr,
             critic_lr=critic_lr,
             batch_size=batch_size,
             hmm_state=hmm_state)

# define ROS service client and messages
rospy.wait_for_service('/gazebo_env_io/pytorch_io_service')
pytorch_io_service = rospy.ServiceProxy('/gazebo_env_io/pytorch_io_service',
                                        SimpleCtrl)
TERMINAL_REWARD, COLLISION_REWARD, SURVIVE_REWARD = utils.get_parameters(
    '../../gazebo_drl_env/param/env.yaml')

# load parameters and experiences
if CONTINUE_TRAIN is True:
    model.load_models()
    model.copy_weights()
    #model.load_buffer()

losses = []
model.noise.reset()
succeed_time = 0
crash_time = 0
max_test_success_time = 0
max_total_reward = -99999

# time profiling
コード例 #23
0
def main():
    
    
    #LOAD CONFIGS################################################################
    args = get_args()
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_no
    
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout, level=logging.INFO,
        format=log_format, datefmt='%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)
    
    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True
    
#     cudnn.enabled=True
    torch.cuda.manual_seed(str(args.rand_seed))
    random.seed(args.rand_seed) 
    #LOAD DATA###################################################################
    def convert_param(original_lists):
      ctype, value = original_lists[0], original_lists[1]
      is_list = isinstance(value, list)
      if not is_list: value = [value]
      outs = []
      for x in value:
        if ctype == 'int':
          x = int(x)
        elif ctype == 'str':
          x = str(x)
        elif ctype == 'bool':
          x = bool(int(x))
        elif ctype == 'float':
          x = float(x)
        elif ctype == 'none':
          if x.lower() != 'none':
            raise ValueError('For the none type, the value must be none instead of {:}'.format(x))
          x = None
        else:
          raise TypeError('Does not know this type : {:}'.format(ctype))
        outs.append(x)
      if not is_list: outs = outs[0]
      return outs

    if args.dataset == 'cifar100':
        mean = [x / 255 for x in [129.3, 124.1, 112.4]]
        std  = [x / 255 for x in [68.2, 65.4, 70.4]]
        lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(mean, std)]
        transform_train = transforms.Compose(lists)
        transform_test  = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])

        with open('../data/cifar-split.txt', 'r') as f:
            data = json.load(f)
            content = { k: convert_param(v) for k,v in data.items()}
            Arguments = namedtuple('Configure', ' '.join(content.keys()))
            content   = Arguments(**content)

        cifar_split = content
        train_split, valid_split = cifar_split.train, cifar_split.valid
    
        print(len(train_split),len(valid_split))
    
        train_dataset = datasets.CIFAR100(root='../data', train=True, download=True, transform=transform_train)
    
    
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=False,sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split),
            num_workers=4, pin_memory=use_gpu)

        train_dataprovider = DataIterator(train_loader)

        val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100(root='../data', train=True, download=True, transform=transform_test),
            batch_size=250, shuffle=False, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split),
            num_workers=4, pin_memory=use_gpu
        )

        val_dataprovider = DataIterator(val_loader)
        print('load data successfully')
        CLASS = 100
    elif args.dataset == 'cifar10':
        mean = [x / 255 for x in [125.3, 123.0, 113.9]]
        std  = [x / 255 for x in [63.0, 62.1, 66.7]]
        lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(mean, std)]
        transform_train = transforms.Compose(lists)
        transform_test  = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
        with open('../data/cifar-split.txt', 'r') as f:
            data = json.load(f)
            content = { k: convert_param(v) for k,v in data.items()}
            Arguments = namedtuple('Configure', ' '.join(content.keys()))
            content   = Arguments(**content)

        cifar_split = content
        train_split, valid_split = cifar_split.train, cifar_split.valid

        print(len(train_split),len(valid_split))

        train_dataset = datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)


        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=False,sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split),
            num_workers=4, pin_memory=use_gpu)

        train_dataprovider = DataIterator(train_loader)

        val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_test),
            batch_size=250, shuffle=False, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split),
            num_workers=4, pin_memory=use_gpu
        )

        val_dataprovider = DataIterator(val_loader)
        print('load data successfully')
        CLASS = 10
    elif args.dataset == 'image16':
        mean = [x / 255 for x in [122.68, 116.66, 104.01]]
        std  = [x / 255 for x in [63.22,  61.26 , 65.09]]
        transform_test  = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
        with open('../data/ImageNet16-120-split.txt', 'r') as f:
            data = json.load(f)
            content = { k: convert_param(v) for k,v in data.items()}
            Arguments = namedtuple('Configure', ' '.join(content.keys()))
            content   = Arguments(**content)
        img_split = content
        train_split, valid_split = img_split.train, img_split.valid
        train_split = train_split[:len(train_split)//args.batch_size*args.batch_size]
        valid_split = valid_split[:len(valid_split)//250*250]
        print(len(train_split),len(valid_split))
        train_dataset = ImageNet16('../data', True , transform_test,120)
        test_dataset  = ImageNet16('../data', True, transform_test,120)
    
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=False,sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split),
            num_workers=4, pin_memory=use_gpu)

        train_dataprovider = DataIterator(train_loader)

        val_loader = torch.utils.data.DataLoader(
                test_dataset,
            batch_size=250, shuffle=False, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split),
            num_workers=4, pin_memory=use_gpu
        )

        val_dataprovider = DataIterator(val_loader)
        
        print('load data successfully')
        CLASS = 120
        
    print(CLASS)
    print(args.init_channels,args.stacks//3)
    model = TinyNetwork(C=args.init_channels,N=args.stacks//3,max_nodes = 4, num_classes = CLASS, search_space = NAS_BENCH_201, affine = False, track_running_stats = False).cuda()
    
    
    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    
    criterion_smooth = CrossEntropyLabelSmooth(CLASS, 0.1)
    
    if use_gpu:

        loss_function = criterion_smooth.cuda()
        device = torch.device("cuda" )
        
    else:
        
        loss_function = criterion_smooth
        device = torch.device("cpu")
        
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,args.total_iters)
    model = model.to(device)
    
    all_iters = 0
    if args.auto_continue:
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model, map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            print('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    
    
    
    
    args.train_dataprovider = train_dataprovider
    args.val_dataprovider = val_dataprovider
    
    args.evo_controller = evolutionary(args.max_population,args.select_number, args.mutation_len,args.mutation_number,args.p_opwise,args.evo_momentum)
    
    
    
    
    
    path = './record_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(args.dataset,args.stacks,args.init_channels,args.total_iters,args.warmup_iters,args.max_population,args.select_number,args.mutation_len,args.mutation_number,args.val_interval,args.p_opwise,args.evo_momentum,args.rand_seed)
    
    logging.info(path)
    
    model.current_N = 1
    
    while all_iters < args.total_iters:
        
        if all_iters in [15000,30000,45000,60000]:
#         if all_iters in [50,100,150,200]:
#             print("----------")
            model.current_N += 1
        
        if all_iters > 1 and all_iters % args.val_interval == 0:
            results = []
            for structure_father in args.evo_controller.group:
                results.append([structure_father.structure,structure_father.loss,structure_father.count])
            if not os.path.exists(path):
                os.mkdir(path)
                
            with open(path + '/%06d-ep.txt'%all_iters,'w') as tt:
                json.dump(results,tt)
            
            
            if all_iters >= args.warmup_iters:#warmup
                args.evo_controller.select()
                
                
            
        
            
            
        all_iters = train(model, device, args, val_interval=args.val_interval, bn_process=False, all_iters=all_iters)
        validate(model, device, args, all_iters=all_iters)
        
    results = []
    for structure_father in args.evo_controller.group:
        results.append([structure_father.structure,structure_father.loss,structure_father.count])
    with open(path + '/%06d-ep.txt'%all_iters,'w') as tt:
        json.dump(results,tt)
コード例 #24
0
def main(args):

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    model = args.model
    params = get_parameters(model)
    params['device'] = torch.device("cuda:0" if args.cuda else "cpu")

    print(params['device'])

    dpmm = NeuralClustering(params).to(params['device'])
    data_generator = get_generator(params)

    #define containers to collect statistics
    losses = []  # NLLs
    accs = []  # Accuracy of the classification prediction
    perm_vars = []  # permutation variance

    it = 0  # iteration counter
    learning_rate = 1e-4
    weight_decay = 0.01
    optimizer = torch.optim.Adam(dpmm.parameters(),
                                 lr=learning_rate,
                                 weight_decay=weight_decay)

    perms = 6  # Number of permutations for each mini-batch.
    # In each permutation, the order of the datapoints is shuffled.

    batch_size = args.batch_size
    max_it = args.iterations

    if params['model'] == 'Gauss2D':
        if not os.path.isdir('saved_models/Gauss2D'):
            os.makedirs('saved_models/Gauss2D')
        if not os.path.isdir('figures/Gauss2D'):
            os.makedirs('figures/Gauss2D')

    elif params['model'] == 'MNIST':
        if not os.path.isdir('saved_models/MNIST'):
            os.makedirs('saved_models/MNIST')
        if not os.path.isdir('figures/MNIST'):
            os.makedirs('figures/MNIST')

    end_name = params['model']
    learning_rates = {1200: 5e-5, 2200: 1e-5}

    t_start = time.time()
    itt = it
    while True:

        it += 1

        if it == max_it:
            break

        dpmm.train()

        if it % args.plot_interval == 0:

            torch.cuda.empty_cache()
            plot_avgs(losses,
                      accs,
                      perm_vars,
                      50,
                      save_name='./figures/train_avgs_' + end_name + '.pdf')

            if params['model'] == 'Gauss2D':
                fig_name = './figures/Gauss2D/samples_2D_' + str(it) + '.pdf'
                print('\nCreating plot at ' + fig_name + '\n')
                plot_samples_2d(dpmm,
                                data_generator,
                                N=100,
                                seed=it,
                                save_name=fig_name)

            elif params['model'] == 'MNIST':
                fig_name = './figures/MNIST/samples_MNIST_' + str(it) + '.pdf'
                print('\nCreating plot at ' + fig_name + '\n')
                plot_samples_MNIST(dpmm,
                                   data_generator,
                                   N=20,
                                   seed=it,
                                   save_name=fig_name)

        if it % 100 == 0:
            if 'fname' in vars():
                os.remove(fname)
            dpmm.params['it'] = it
            fname = 'saved_models/' + end_name + '/' + end_name + '_' + str(
                it) + '.pt'
            torch.save(dpmm, fname)

        if it in learning_rates:
            optimizer = torch.optim.Adam(dpmm.parameters(),
                                         lr=learning_rates[it],
                                         weight_decay=weight_decay)

        data, cs, clusters, K = data_generator.generate(None, batch_size)
        N = data.shape[1]

        loss_values = np.zeros(perms)
        accuracies = np.zeros([N - 1, perms])

        # The memory requirements change in each iteration according to the random values of N and K.
        # If both N and K are big, an out of memory RuntimeError exception might be raised.
        # When this happens, we capture the exception, reduce the batch_size to 3/4 of its value, and try again.

        while True:
            try:

                loss = 0

                for perm in range(perms):
                    arr = np.arange(N)
                    np.random.shuffle(
                        arr
                    )  # permute the order in which the points are queried
                    cs = cs[arr]
                    data = data[:, arr, :]

                    cs = relabel(
                        cs
                    )  # this makes cluster labels appear in cs[] in increasing order

                    this_loss = 0
                    dpmm.previous_n = 0

                    for n in range(1, N):
                        # points up to (n-1) are already assigned, the point n is to be assigned

                        logprobs = dpmm(data, cs, n)
                        c = cs[n]
                        accuracies[n - 1, perm] = np.sum(
                            np.argmax(logprobs.detach().to('cpu').numpy(),
                                      axis=1) == c) / logprobs.shape[0]

                        this_loss -= logprobs[:, c].mean()

                    this_loss.backward(
                    )  # this accumulates the gradients for each permutation
                    loss_values[perm] = this_loss.item() / N
                    loss += this_loss

                perm_vars.append(loss_values.var())
                losses.append(loss.item() / N)
                accs.append(accuracies.mean())

                optimizer.step(
                )  # the gradients used in this step are the sum of the gradients for each permutation
                optimizer.zero_grad()


                print('{0:4d}  N:{1:2d}  K:{2}  Mean NLL:{3:.3f}   Mean Acc:{4:.3f}   Mean Permutation Variance: {5:.7f}  Mean Time/Iteration: {6:.1f}'\
                      .format(it, N, K , np.mean(losses[-50:]), np.mean(accs[-50:]), np.mean(perm_vars[-50:]), (time.time()-t_start)/(it - itt)    ))

                break

            except RuntimeError:
                bsize = int(.75 * data.shape[0])
                if bsize > 2:
                    print('RuntimeError handled  ', 'N:', N, ' K:', K,
                          'Trying batch size:', bsize)
                    data = data[:bsize, :, :]
                else:
                    break
コード例 #25
0
ファイル: default.py プロジェクト: Rassixx/xbmc-pneumatic
if (__name__ == "__main__" ):
    log('v%s started' % __settings__.getAddonInfo("version"), xbmc.LOGNOTICE)
    HANDLE = int(sys.argv[1])
    if not (__settings__.getSetting("firstrun")):
        __settings__.openSettings()
        #TODO fix this
        if utils.pass_setup_test(sabnzbd.self_test(), __settings__.getSetting("sabnzbd_incomplete")):
            __settings__.setSetting("firstrun", '1')
    else:
        if (not sys.argv[2]):
            add_posts({'title':'Incomplete'}, '', MODE_INCOMPLETE)
            add_posts({'title':'Browse local NZB\'s'}, '', MODE_LOCAL, '', '')
            xbmcplugin.setContent(HANDLE, 'movies')
            xbmcplugin.endOfDirectory(HANDLE, succeeded=True, cacheToDisc=True)
        else:
            params = utils.get_parameters(sys.argv[2])
            get = params.get
            if get("mode")== MODE_PLAY:
                is_home, sab_nzo_id = is_nzb_home(params)
                if is_home:
                    nzbname = utils.unquote_plus(get("nzbname"))
                    pre_play(nzbname, nzo=sab_nzo_id)
            if get("mode")== MODE_LIST_PLAY or get("mode")== MODE_AUTO_PLAY:
                play_video(params)
            if get("mode")== MODE_DELETE:
                delete(params)
            if get("mode")== MODE_DOWNLOAD:
                download(params)
            if get("mode")== MODE_REPAIR:
                repair(params)
            if get("mode")== MODE_INCOMPLETE:
コード例 #26
0
def get_cand_err(model, cand, args):
    global train_dataprovider, val_dataprovider

    if train_dataprovider is None:
        use_gpu = False
        train_dataprovider = get_train_dataprovider(args.train_batch_size,
                                                    use_gpu=True,
                                                    num_workers=32)
        val_dataprovider = get_val_dataprovider(args.test_batch_size,
                                                use_gpu=True,
                                                num_workers=32)

    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    max_train_iters = args.max_train_iters
    max_test_iters = args.max_test_iters

    print('clear bn statics....')
    # for m in model.modules():
    #     if isinstance(m, torch.nn.BatchNorm2d):
    #         m.running_mean = torch.zeros_like(m.running_mean)
    #         m.running_var = torch.ones_like(m.running_var)

    print('train bn with training set (BN sanitize) ....')
    # meta_model = ShuffleNetV2_OneShot()
    # meta_model = nn.DataParallel(meta_model)
    # meta_model = meta_model.to(device)
    # for p, q in zip(model.parameters(), meta_model.parameters()):
    #     if p is not None:
    #         q = p.clone()

    optimizer = torch.optim.SGD(get_parameters(model), lr=0.001)
    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)
    loss_function = criterion_smooth.cuda()
    model.train()

    for step in tqdm.tqdm(range(max_train_iters)):
        # print('train step: {} total: {}'.format(step,max_train_iters))
        data, target = train_dataprovider.next()
        # print('get data',data.shape)

        target = target.type(torch.LongTensor)

        data, target = data.to(device), target.to(device)

        # print(type(data))
        # data = data.requires_grad_()
        # data = torch.tensor(data.data, requires_grad=True)
        # data = data.cuda()
        # # target.requires_grad=True
        output = model(data, cand)

        # if step<10:
        #     loss = loss_function(output, target)
        #     optimizer.zero_grad()
        #     loss.backward()
        #     optimizer.step()

        del data, target, output

    top1 = 0
    top5 = 0
    total = 0

    print('starting test....')
    model.eval()

    for step in tqdm.tqdm(range(max_test_iters)):
        # print('test step: {} total: {}'.format(step,max_test_iters))
        data, target = val_dataprovider.next()
        batchsize = data.shape[0]
        # print('get data',data.shape)
        target = target.type(torch.LongTensor)
        data, target = data.to(device), target.to(device)

        logits = model(data, cand)

        prec1, prec5 = accuracy(logits, target, topk=(1, 5))

        # print(prec1.item(),prec5.item())

        top1 += prec1.item() * batchsize
        top5 += prec5.item() * batchsize
        total += batchsize

        del data, target, logits, prec1, prec5

    top1, top5 = top1 / total, top5 / total

    top1, top5 = 1 - top1 / 100, 1 - top5 / 100

    print('top1: {:.2f} top5: {:.2f}'.format(top1 * 100, top5 * 100))

    return top1, top5
コード例 #27
0
def cross_fold(model_generator,
               frac,
               epochs,
               K,
               optimiser_generator,
               scheduler_generator,
               step_every,
               output_to_std=False):

    means = None
    sds = None
    times = None
    for k in range(K):
        train_set = df.sample(frac=frac)
        test_set = df.drop(train_set.index)

        parameters = utils.get_parameters()

        train_y = train_set.iloc[:, -len(parameters):]
        train_x = train_set.iloc[:, :-len(parameters)]

        test_y = test_set.iloc[:, -len(parameters):]
        test_x = test_set.iloc[:, :-len(parameters)]

        train_data_set = FingerprintDataset(train_x, train_y)
        test_data_set = FingerprintDataset(test_x, test_y)

        train_loader = DataLoader(train_data_set, batch_size=64, shuffle=True)

        test_loader = DataLoader(test_data_set, batch_size=64, shuffle=False)

        network = model_generator()
        optimiser = optimiser_generator(network)
        scheduler = scheduler_generator(
            optimiser) if scheduler_generator is not None else None

        for epoch in range(epochs):
            epoch_loss = 0
            for batch in train_loader:

                optimiser.zero_grad()

                loss_func = torch.nn.MSELoss()

                loss = loss_func(network(batch['fingerprint']),
                                 batch['parameters'].float())

                loss.backward()
                optimiser.step()

                epoch_loss += loss.item()

            if scheduler is not None and epoch % step_every == 0 and epoch != 0:
                scheduler.step()

            if output_to_std:
                print('Epoch [%d/%d]  loss %f' %
                      (epoch, epochs, epoch_loss / len(train_loader)))

        validation_loss = None

        total_time = 0
        with torch.no_grad():
            for batch in test_loader:

                start = time.perf_counter()
                predicted_values = network(batch['fingerprint'])
                end = time.perf_counter()
                total_time += end - start

                expected_values = batch['parameters']

                percentage_error = torch.div(
                    torch.abs(expected_values - predicted_values),
                    expected_values)

                if validation_loss is None:
                    validation_loss = percentage_error
                else:
                    validation_loss = torch.cat(
                        [validation_loss, percentage_error])

        average_time = total_time / len(test_loader)

        if means is None and sds is None:
            means = torch.mean(validation_loss, 0).unsqueeze(0)
            sds = torch.std(validation_loss, 0).unsqueeze(0)
            times = average_time
        else:
            means = torch.cat(
                [means, torch.mean(validation_loss, 0).unsqueeze(0)])
            sds = torch.cat([sds, torch.std(validation_loss, 0).unsqueeze(0)])
            times += average_time

    return (torch.mean(means, 0), torch.mean(sds, 0), torch.tensor(times / k))
コード例 #28
0
def main():
    # 0. input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=int, help='GPU device to use', default=0)
    parser.add_argument('-d', '--dataset', help='VOC, CamVid, SUNRGBD, Custom', default='CamVid')
    parser.add_argument('-dr', '--datasetroot', help='dataset root pth', default='/home/hongkai/PycharmProjects/Datasets')
    parser.add_argument('-dt', '--degradedtrain', help='o, bg, bm, hi, ho, ns, nsp', default='o')
    parser.add_argument('-dv', '--degradedval', help='o, bg, bm, hi, ho, ns, nsp', default='o')
    parser.add_argument('-ds', '--degradedtest', help='o, bg, bm, hi, ho, ns, nsp', default='o')
    parser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys())
    parser.add_argument('-r', '--resume', help='Checkpoint path')
    args = parser.parse_args()

    gpu = args.gpu
    dataset = args.dataset
    dataset_root = args.datasetroot
    degradedtrain = args.degradedtrain
    degradedval = args.degradedval
    degradedtest = args.degradedtest
    cfg = configurations[args.config]
    out = utils.get_log_dir('fcn8s-atonce', args.config, cfg)
    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()
    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset
    root = osp.expanduser(osp.join(dataset_root, dataset))
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    if dataset == 'VOC':
        train_data = datasets.VOCSeg(root, split='train', dataset=degradedtrain, transform=True)
        val_data = datasets.VOCSeg(root, split='val', dataset=degradedval, transform=True)
        test_data = datasets.VOCSeg(root, split='test', dataset=degradedtest, transform=True)
    elif dataset == "CamVid":
        train_data = datasets.CamVidSeg(root, split='train', dataset=degradedtrain, transform=True)
        val_data = datasets.CamVidSeg(root, split='val', dataset=degradedval, transform=True)
        test_data = datasets.CamVidSeg(root, split='test', dataset=degradedtest, transform=True)
    elif dataset == "Cityscapes":
        train_data = datasets.CityscapesSeg(root, split='train', dataset=degradedtrain, transform=True)
        val_data = datasets.CityscapesSeg(root, split='val', dataset=degradedval, transform=True)
        test_data = datasets.CityscapesSeg(root, split='test', dataset=degradedtest, transform=True)
    elif dataset == "Custom":
        train_data = datasets.CustomSeg(root, split='train', dataset=degradedtrain, transform=True)
        val_data = datasets.CustomSeg(root, split='val', dataset=degradedval, transform=True)
        test_data = datasets.CustomSeg(root, split='test', dataset=degradedtest, transform=True)
    else:
        train_data = datasets.SUNSeg(root, split='train', dataset=degradedtrain, transform=True)
        val_data = datasets.SUNSeg(root, split='val', dataset=degradedval, transform=True)
        test_data = datasets.SUNSeg(root, split='test', dataset=degradedtest, transform=True)

    train_loader = torch.utils.data.DataLoader(train_data, batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, **kwargs)

    # 2. model
    model = models.FCN8sAtOnce(n_class=train_data.n_classes)
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    device = torch.device("cuda" if cuda else "cpu")
    model = model.to(device)

    # 3. optimizer
    optim = torch.optim.SGD(
        [
            {'params': utils.get_parameters(model, bias=False)},
            {'params': utils.get_parameters(model, bias=True),
             'lr': cfg['lr'] * 2, 'weight_decay': 0},
        ],
        lr=cfg['lr'],
        momentum=cfg['momentum'],
        weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    # 4. trainer
    trainer = Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        test_loader=test_loader,
        out=out,
        max_iter=cfg['max_iteration'],
        interval_validate=cfg.get('interval_validate', len(train_loader)),
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
コード例 #29
0
def main():
    args = get_args()

    # archLoader
    arch_loader = ArchLoader(args.path)

    # Log
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%m-%d %I:%M:%S')
    t = time.time()
    local_time = time.localtime(t)
    if not os.path.exists('./log'):
        os.mkdir('./log')
    fh = logging.FileHandler(
        os.path.join('log/train-{}-{:02}-{:02}-{:.3f}'.format(
            local_time.tm_year % 2000, local_time.tm_mon, local_time.tm_mday,
            t)))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    use_gpu = False
    if torch.cuda.is_available():
        use_gpu = True

    kwargs = {'num_workers': 4, 'pin_memory': True}

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        root="./data",
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    val_loader = torch.utils.data.DataLoader(datasets.MNIST(
        root="./data",
        train=False,
        transform=transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             **kwargs)

    model = mutableResNet20(num_classes=10)
    base_model = copy.deepcopy(model)

    logging.info('load model successfully')

    optimizer = torch.optim.SGD(get_parameters(model),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)

    if use_gpu:
        model = nn.DataParallel(model)
        loss_function = criterion_smooth.cuda()
        device = torch.device("cuda")
        base_model.cuda()
    else:
        loss_function = criterion_smooth
        device = torch.device("cpu")

    # scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
    #                                               lambda step: (1.0-step/args.total_iters) if step <= args.total_iters else 0, last_epoch=-1)
    scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=5)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    #     optimizer, T_max=200)

    model = model.to(device)

    all_iters = 0

    if args.auto_continue:
        lastest_model, iters = get_lastest_model()
        if lastest_model is not None:
            all_iters = iters
            checkpoint = torch.load(lastest_model,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            logging.info('load from checkpoint')
            for i in range(iters):
                scheduler.step()

    # 参数设置
    args.optimizer = optimizer
    args.loss_function = loss_function
    args.scheduler = scheduler
    args.train_loader = train_loader
    args.val_loader = val_loader

    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume,
                                    map_location=None if use_gpu else 'cpu')
            model.load_state_dict(checkpoint, strict=True)
            validate(model,
                     device,
                     args,
                     all_iters=all_iters,
                     arch_loader=arch_loader)
        exit(0)

    # warmup weights
    if args.warmup is not None:
        logging.info("begin warmup weights")
        while all_iters < args.warmup:
            all_iters = train_supernet(model,
                                       device,
                                       args,
                                       bn_process=False,
                                       all_iters=all_iters)

        validate(model,
                 device,
                 args,
                 all_iters=all_iters,
                 arch_loader=arch_loader)

    while all_iters < args.total_iters:
        all_iters = train_subnet(model,
                                 base_model,
                                 device,
                                 args,
                                 bn_process=False,
                                 all_iters=all_iters,
                                 arch_loader=arch_loader)
        logging.info("validate iter {}".format(all_iters))

        if all_iters % 9 == 0:
            validate(model,
                     device,
                     args,
                     all_iters=all_iters,
                     arch_loader=arch_loader)

    validate(model, device, args, all_iters=all_iters, arch_loader=arch_loader)