class Loader(object): def __init__(self, args): self.args = args if self.args.dataset == 'cityscapes': self.nclass = 19 elif self.args.dataset in ['2d','3d']: self.nclass = 4 # Resuming checkpoint self.best_pred = 0.0 assert args.resume is not None, RuntimeError("No model to decode in resume path: '{:}'".format(args.resume)) assert os.path.isfile(args.resume), RuntimeError("=> no checkpoint found at '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] self._alphas = checkpoint['state_dict']['alphas'] self._betas = checkpoint['state_dict']['betas'] self.decoder = Decoder(alphas=self._alphas, betas=self._betas, steps=5) def retreive_alphas_betas(self): return self._alphas, self._betas def decode_architecture(self): paths, paths_space = self.decoder.viterbi_decode() return paths, paths_space def decode_cell(self): genotype = self.decoder.genotype_decode() return genotype
class Loader(object): def __init__(self, args): self.args = args if self.args.dataset == 'cityscapes': self.nclass = 19 self.model = AutoDeeplab(num_classes=self.nclass, num_layers=12, filter_multiplier=self.args.filter_multiplier, block_multiplier=args.block_multiplier, step=args.step) # Using cuda if args.cuda: if (torch.cuda.device_count() > 1 or args.load_parallel): self.model = torch.nn.DataParallel(self.model.cuda()) patch_replication_callback(self.model) self.model = self.model.cuda() print('cuda finished') # Resuming checkpoint self.best_pred = 0.0 if args.resume is not None: if not os.path.isfile(args.resume): raise RuntimeError("=> no checkpoint found at '{}'".format( args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] # if the weights are wrapped in module object we have to clean it if args.clean_module: self.model.load_state_dict(checkpoint['state_dict']) state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove 'module.' of dataparallel new_state_dict[name] = v self.model.load_state_dict(new_state_dict) else: if (torch.cuda.device_count() > 1 or args.load_parallel): self.model.module.load_state_dict(checkpoint['state_dict']) else: self.model.load_state_dict(checkpoint['state_dict']) self.decoder = Decoder(self.model.alphas, self.model.bottom_betas, self.model.betas8, self.model.betas16, self.model.top_betas, args.block_multiplier, args.step) def retreive_alphas_betas(self): return self.model.alphas, self.model.bottom_betas, self.model.betas8, self.model.betas16, self.model.top_betas def decode_architecture(self): paths, paths_space = self.decoder.viterbi_decode() return paths, paths_space def decode_cell(self): genotype = self.decoder.genotype_decode() return genotype
def decode_viterbi(self): decoder = Decoder(self.alphas_d, self.alphas_c, self.betas, 5) return decoder.viterbi_decode()
def decode_viterbi(self): decoder = Decoder(self.bottom_betas, self.betas8, self.betas16, self.top_betas) return decoder.viterbi_decode()