Esempio n. 1
0
    def _init_model(self):
        self.model = ShellNet(in_channels=self.config['data']['in_channels'], 
                              init_node_c=self.config['search']['init_node_c'], 
                              out_channels=self.config['data']['out_channels'], 
                              depth=self.config['search']['depth'], 
                              n_nodes=self.config['search']['n_nodes']).to(self.device)
        print('Param size = {:.3f} MB'.format(calc_param_size(self.model.parameters())))
        self.loss = nn.CrossEntropyLoss().to(self.device)

        self.optim_shell = Adam(self.model.alphas()) 
        self.optim_kernel = Adam(self.model.kernel.parameters())
        self.shell_scheduler = ReduceLROnPlateau(self.optim_shell,verbose=True,factor=0.5)
        self.kernel_scheduler = ReduceLROnPlateau(self.optim_kernel,verbose=True,factor=0.5)
Esempio n. 2
0
    def _init_model(self):
        self.model = ShellNet(in_channels=len(self.config['data']['all_mods']), 
                              init_n_kernels=self.config['search']['init_n_kernels'], 
                              out_channels=len(self.config['data']['labels']), 
                              depth=self.config['search']['depth'], 
                              n_nodes=self.config['search']['n_nodes'],
                              normal_w_share=self.config['search']['normal_w_share'], 
                              channel_change=self.config['search']['channel_change']).to(self.device)
        print('Param size = {:.3f} MB'.format(calc_param_size(self.model)))
        self.loss = WeightedDiceLoss().to(self.device)

        self.optim_shell = Adam(self.model.alphas()) # lr=3e-4
        self.optim_kernel = Adam(self.model.kernel.parameters())
        self.shell_scheduler = ReduceLROnPlateau(self.optim_shell,verbose=True,factor=0.5)
        self.kernel_scheduler = ReduceLROnPlateau(self.optim_kernel,verbose=True,factor=0.5)
Esempio n. 3
0
    def _init_model(self):
        geno_file = self.config['search']['geno_file']
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            in_channels=len(self.config['data']['all_mods']),
            init_n_kernels=self.config['search']['init_n_kernels'],
            out_channels=len(self.config['data']['labels']),
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            channel_change=self.config['search']['channel_change'],
            gene=gene).to(self.device)
        print('Param size = {:.3f} MB'.format(calc_param_size(self.model)))
        self.loss = WeightedDiceLoss().to(self.device)

        self.optim = Adam(self.model.parameters())
        self.scheduler = ReduceLROnPlateau(self.optim,
                                           verbose=True,
                                           factor=0.5)
Esempio n. 4
0
 def _init_model(self):
     self.model = ShellNet(img_size=self.config['data']['img_size'],
                           in_channels=self.config['data']['in_channels'],
                           init_node_c=self.config['search']['init_node_c'],
                           out_channels=self.config['data']['out_channels'],
                           depth=self.config['search']['depth'],
                           n_nodes=self.config['search']['n_nodes'])
     self.model(np.random.rand(1, 28, 28, 1).astype('float32'),
                training=True)
     print('Param size = {:.3f} MB'.format(
         calc_param_size(self.model.trainable_variables)))
     self.loss = lambda props, y_truth: tf.reduce_mean(
         tf.nn.sparse_softmax_cross_entropy_with_logits(y_truth, props))
     self.optim_shell = Adam()
     self.optim_kernel = Adam()
     self.shell_scheduler = ReduceLROnPlateau(self.optim_shell,
                                              framework='tf')
     self.kernel_scheduler = ReduceLROnPlateau(self.optim_kernel,
                                               framework='tf')
Esempio n. 5
0
    def _init_model(self):
        geno_file = os.path.join(self.log_path,
                                 self.config['search']['geno_file'])
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            gene=gene,
            in_channels=self.config['data']['in_channels'],
            init_node_c=self.config['search']['init_node_c'],
            out_channels=self.config['data']['out_channels'],
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            drop_rate=self.config['train']['drop_rate'])
        print('Param size = {:.3f} MB'.format(
            calc_param_size(self.model.parameters())))
        self.loss = lambda props, y_truth: fluid.layers.reduce_mean(
            fluid.layers.softmax_with_cross_entropy(props, y_truth))

        self.optim = Adam(parameter_list=self.model.parameters())
        self.scheduler = ReduceLROnPlateau(self.optim)
Esempio n. 6
0
    def _init_model(self):
        geno_file = os.path.join(self.log_path,
                                 self.config['search']['geno_file'])
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            gene=gene,
            in_channels=self.config['data']['in_channels'],
            init_node_c=self.config['search']['init_node_c'],
            out_channels=self.config['data']['out_channels'],
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            drop_rate=self.config['train']['drop_rate']).to(self.device)
        print('Param size = {:.3f} MB'.format(
            calc_param_size(self.model.parameters())))
        self.loss = nn.CrossEntropyLoss().to(self.device)

        self.optim = Adam(self.model.parameters())
        self.scheduler = ReduceLROnPlateau(self.optim,
                                           verbose=True,
                                           factor=0.5)
Esempio n. 7
0
    def _init_model(self):
        geno_file = os.path.join(self.log_path,
                                 self.config['search']['geno_file'])
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            gene=gene,
            in_channels=self.config['data']['in_channels'],
            init_node_c=self.config['search']['init_node_c'],
            out_channels=self.config['data']['out_channels'],
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            drop_rate=self.config['train']['drop_rate']).to(self.device)
        print('Param size = {:.3f} MB'.format(
            calc_param_size(self.model.parameters())))
        self.loss = nn.CrossEntropyLoss().to(self.device)

        state_dicts = torch.load(os.path.join(
            self.log_path, self.config['train']['best_shot']),
                                 map_location=self.device)
        self.model.load_state_dict(state_dicts['model_param'])
        self.model.eval()
Esempio n. 8
0
    def _init_model(self):
        geno_file = os.path.join(self.search_log,
                                 self.config['search']['geno_file'])
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            gene=gene,
            img_size=self.config['data']['img_size'],
            in_channels=self.config['data']['in_channels'],
            init_node_c=self.config['search']['init_node_c'],
            out_channels=self.config['data']['out_channels'],
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            drop_rate=self.config['train']['drop_rate'])
        self.model(np.random.rand(1, 28, 28, 1).astype('float32'),
                   training=True)
        print('Param size = {:.3f} MB'.format(
            calc_param_size(self.model.trainable_variables)))
        self.loss = lambda props, y_truth: tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(y_truth, props))

        self.optim = Adam()
        self.scheduler = ReduceLROnPlateau(self.optim, framework='tf')