Example #1
0
File: model.py Project: lxastro/dlx
    def weighted(y_true, y_pred, weights, mask=None):
        # score_array has ndim >= 2
        score_array = fn(y_true, y_pred)
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            mask = K.cast(mask, K.floatX)
            # mask should have the same shape as score_array
            score_array *= mask
            #  the loss per batch should be proportional
            #  to the number of unmasked samples.
            score_array /= K.mean(mask)

        # reduce score_array to 1D
        ndim = K.ndim(score_array)
        for _ in range(ndim-1):
            score_array = K.mean(score_array, axis=-1)

        if weights is not None:
            score_array *= weights
        return K.mean(score_array)
Example #2
0
    def weighted(y_true, y_pred, weights, mask=None):
        # score_array has ndim >= 2
        score_array = fn(y_true, y_pred)
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            mask = K.cast(mask, K.floatX)
            # mask should have the same shape as score_array
            score_array *= mask
            #  the loss per batch should be proportional
            #  to the number of unmasked samples.
            score_array /= K.mean(mask)

        # reduce score_array to 1D
        ndim = K.ndim(score_array)
        for _ in range(ndim - 1):
            score_array = K.mean(score_array, axis=-1)

        if weights is not None:
            score_array *= weights
        return K.mean(score_array)
Example #3
0
def cosine_proximity(y_true, y_pred):
    assert K.ndim(y_true) == 2
    assert K.ndim(y_pred) == 2
    y_true = K.l2_normalize(y_true, axis=1)
    y_pred = K.l2_normalize(y_pred, axis=1)
    return -K.mean(y_true * y_pred, axis=1)
Example #4
0
def cosine_proximity(y_true, y_pred):
    assert K.ndim(y_true) == 2
    assert K.ndim(y_pred) == 2
    y_true = K.l2_normalize(y_true, axis=1)
    y_pred = K.l2_normalize(y_pred, axis=1)
    return -K.mean(y_true * y_pred, axis=1)
Example #5
0
File: model.py Project: lxastro/dlx
    def compile(self, optimizer, loss_configures, verbose = 0):
        '''Configure the learning process.

        # Arguments
            optimizer: str (name of optimizer) or optimizer object.
                See [optimizers](optimizers.md).
            loss_configures: list of tuples: (output name, objective function, mask name, weighted, class_mode) 
                weighted: true or false. If true, must provide class_weight or sample_weight in same order.                        
                objective function can be string name of objective function or function or None(for output without calculate loss).
                class_mode: one of "categorical", "binary", None.
        '''
        self.optimizer = optimizers.get(optimizer)
        self.loss_configures = loss_configures
        self.input_order = self.input_names
        self.train_output_order = []
        self.predict_output_order = []
        self.weight_order = []
        self.out_labels = ['loss',]
        
        if verbose:
            print 'input units:', self.input_units
            print 'output units:', self.output_units
            print 'hidden units:', self.hidden_units
        
        units = self.input_units + self.output_units + self.hidden_units
        for unit in units:
            unit.check()
        for unit in units:
            unit.build()
            
        self.params = []
        self.regularizers = []    
        self.constraints = []
        self.updates = []    
        for unit in self.hidden_units:
            pars = unit.get_params()
            self.params += pars[0]
            self.regularizers += pars[1]
            self.constraints += pars[2]
            self.updates += pars[3]
        
        if verbose:
            print 'parameters:', [TU.sp(param) for param in self.params]
            print 'regularizers:', self.regularizers
            print 'constraints:', self.constraints
            print 'updates:', self.updates
         
        outputs_train = []
        outputs_test = []   
        ys_test = [] 
        for output_unit, output_name_list in zip(self.output_units, self.output_names): 
            res_train = standardize_l(output_unit.get_results(train=True))
            if len(output_name_list) != len(res_train):
                raise Exception('Number of outputs(train) not match number of names (%s)'%str(output_name_list))
            res_test = standardize_l(output_unit.get_results(train=False))
            if len(output_name_list) != len(res_test):
                raise Exception('Number of outputs(test) not match number of names (%s)'%str(output_name_list))  
                      
            outputs_train.append(res_train)
            outputs_test.append(res_test)
            ys_test += res_test
            self.predict_output_order += output_name_list
            
        if verbose:
            print 'output names:', self.output_names
            print 'outputs train:', outputs_train
            print 'outputs test:', outputs_test
            if verbose >= 2:
                for vares, names in zip(outputs_train, self.output_names):
                    for var,name in zip(vares, names):
                        if verbose == 2:
                            print 'output_' + name + ':', TU.sp(var)
                        else:
                            print 'output_' + name, ':'
                            TU.dp(var)
            
            
        def find_output(output_name, train):
            if train:
                outputs = outputs_train
            else:
                outputs = outputs_test
            
            for output_list, output_name_list in zip(outputs, self.output_names): 
                for output, name in zip(output_list, output_name_list):
                    if output_name == name:
                        return output
            raise Exception('Can not find output "%s".'%output_name)
        
        ys = []
        ys_train = []
        train_accs = []
        test_accs = []
        weights = []
        train_loss = 0.
        test_loss = 0.
        
        for loss_cf in loss_configures:
              
            output_name = loss_cf[0]
            obj_fn = loss_cf[1]
            mask_name = loss_cf[2]
            weighted = loss_cf[3]
            class_mode = loss_cf[4]
            
            y_train = find_output(output_name, train=True)
            y_test = find_output(output_name ,train=False)
            
            if mask_name:
                mask_train = find_output(mask_name, train=True)
                mask_test = find_output(mask_name, train=False)
            else:
                mask_train = None
                mask_test = None
            
            y = TU.tensor(ndim=K.ndim(y_train), name=output_name)
            ys.append(y)
            
            ys_train.append(y_train)
            self.train_output_order.append(output_name)
            
            if obj_fn:
                
                if weighted:
                    self.weight_order += output_name
                    weight = TU.tensor(1, name=output_name+'_weight')                   
                    weights.append(weight)           
                    weighted_loss = weighted_objective(objectives.get(obj_fn))
                    train_loss += weighted_loss(y, y_train, weight, mask_train)
                    test_loss += weighted_loss(y, y_test, weight, mask_test)
                else:
                    weighted_loss = weighted_objective(objectives.get(obj_fn))
                    train_loss += weighted_loss(y, y_train, None, mask_train)
                    test_loss += weighted_loss(y, y_test, None, mask_test)        
                
            if class_mode:
                self.out_labels.append('acc_'+output_name)
                
                if class_mode == "categorical":
                    weighted_accuracy = weighted_objective(categorical_accuracy)
                    train_accuracy = weighted_accuracy(y, y_train, None, mask_train)
                    test_accuracy = weighted_accuracy(y, y_test, None, mask_test)
                    
                elif class_mode == "binary":
                    weighted_accuracy = weighted_objective(binary_accuracy)
                    train_accuracy = weighted_accuracy(y, y_train, None, mask_train)
                    test_accuracy = weighted_accuracy(y, y_test, None, mask_test)
                else:
                    raise Exception("Invalid class mode:" + str(class_mode))
                train_accs.append(train_accuracy)
                test_accs.append(test_accuracy)

        if verbose:
            print 'ys:', ys
            print 'ys_train:', ys_train
            print 'predict output order:', self.predict_output_order
            print 'ys_test:', ys_test
            print 'train output order:', self.train_output_order
            print 'train_accs:', train_accs
            print 'test_accs:', test_accs
            print 'weight:', weights  
        if verbose == 2:
            print 'train_loss:', TU.sp(train_loss)
            print 'test_loss:', TU.sp(test_loss)
        if verbose >= 3:
            print 'train_loss:'
            TU.dp(train_loss)
            print 'test_loss:' 
            TU.dp(test_loss)
            
  
                        
        ins = []
        for input_unit in self.input_units:
            ins.append(input_unit.get_variable())
        
        if verbose:
            print 'ins:', ins
           
        train_vars = ins + ys + weights
        test_vars = ins + ys + weights
        
        for r in self.regularizers:
            train_loss = r(train_loss)

        updates = self.optimizer.get_updates(self.params,
                                             self.constraints,
                                             train_loss)
        
        state_updates = self.updates
            
        updates += state_updates
        
        if verbose:
            print 'train_vars:', train_vars
            print 'test_vars:', test_vars

        if verbose:
            print 'updates:'
            for update in updates:
                print update       
        
        self._train = K.function(train_vars, [train_loss], updates=updates)
        self._train_with_acc = K.function(train_vars, [train_loss] + train_accs, updates=updates)
        self._test = K.function(test_vars, [test_loss], updates=state_updates)
        self._test_with_acc = K.function(test_vars, [test_loss] + test_accs, updates=state_updates)
        self._predict = K.function(inputs=ins, outputs=ys_test, updates=state_updates)
Example #6
0
    def compile(self, optimizer, loss_configures, verbose=0):
        '''Configure the learning process.

        # Arguments
            optimizer: str (name of optimizer) or optimizer object.
                See [optimizers](optimizers.md).
            loss_configures: list of tuples: (output name, objective function, mask name, weighted, class_mode) 
                weighted: true or false. If true, must provide class_weight or sample_weight in same order.                        
                objective function can be string name of objective function or function or None(for output without calculate loss).
                class_mode: one of "categorical", "binary", None.
        '''
        self.optimizer = optimizers.get(optimizer)
        self.loss_configures = loss_configures
        self.input_order = self.input_names
        self.train_output_order = []
        self.predict_output_order = []
        self.weight_order = []
        self.out_labels = [
            'loss',
        ]

        if verbose:
            print 'input units:', self.input_units
            print 'output units:', self.output_units
            print 'hidden units:', self.hidden_units

        units = self.input_units + self.output_units + self.hidden_units
        for unit in units:
            unit.check()
        for unit in units:
            unit.build()

        self.params = []
        self.regularizers = []
        self.constraints = []
        self.updates = []
        for unit in self.hidden_units:
            pars = unit.get_params()
            self.params += pars[0]
            self.regularizers += pars[1]
            self.constraints += pars[2]
            self.updates += pars[3]

        if verbose:
            print 'parameters:', [TU.sp(param) for param in self.params]
            print 'regularizers:', self.regularizers
            print 'constraints:', self.constraints
            print 'updates:', self.updates

        outputs_train = []
        outputs_test = []
        ys_test = []
        for output_unit, output_name_list in zip(self.output_units,
                                                 self.output_names):
            res_train = standardize_l(output_unit.get_results(train=True))
            if len(output_name_list) != len(res_train):
                raise Exception(
                    'Number of outputs(train) not match number of names (%s)' %
                    str(output_name_list))
            res_test = standardize_l(output_unit.get_results(train=False))
            if len(output_name_list) != len(res_test):
                raise Exception(
                    'Number of outputs(test) not match number of names (%s)' %
                    str(output_name_list))

            outputs_train.append(res_train)
            outputs_test.append(res_test)
            ys_test += res_test
            self.predict_output_order += output_name_list

        if verbose:
            print 'output names:', self.output_names
            print 'outputs train:', outputs_train
            print 'outputs test:', outputs_test
            if verbose >= 2:
                for vares, names in zip(outputs_train, self.output_names):
                    for var, name in zip(vares, names):
                        if verbose == 2:
                            print 'output_' + name + ':', TU.sp(var)
                        else:
                            print 'output_' + name, ':'
                            TU.dp(var)

        def find_output(output_name, train):
            if train:
                outputs = outputs_train
            else:
                outputs = outputs_test

            for output_list, output_name_list in zip(outputs,
                                                     self.output_names):
                for output, name in zip(output_list, output_name_list):
                    if output_name == name:
                        return output
            raise Exception('Can not find output "%s".' % output_name)

        ys = []
        ys_train = []
        train_accs = []
        test_accs = []
        weights = []
        train_loss = 0.
        test_loss = 0.

        for loss_cf in loss_configures:

            output_name = loss_cf[0]
            obj_fn = loss_cf[1]
            mask_name = loss_cf[2]
            weighted = loss_cf[3]
            class_mode = loss_cf[4]

            y_train = find_output(output_name, train=True)
            y_test = find_output(output_name, train=False)

            if mask_name:
                mask_train = find_output(mask_name, train=True)
                mask_test = find_output(mask_name, train=False)
            else:
                mask_train = None
                mask_test = None

            y = TU.tensor(ndim=K.ndim(y_train), name=output_name)
            ys.append(y)

            ys_train.append(y_train)
            self.train_output_order.append(output_name)

            if obj_fn:

                if weighted:
                    self.weight_order += output_name
                    weight = TU.tensor(1, name=output_name + '_weight')
                    weights.append(weight)
                    weighted_loss = weighted_objective(objectives.get(obj_fn))
                    train_loss += weighted_loss(y, y_train, weight, mask_train)
                    test_loss += weighted_loss(y, y_test, weight, mask_test)
                else:
                    weighted_loss = weighted_objective(objectives.get(obj_fn))
                    train_loss += weighted_loss(y, y_train, None, mask_train)
                    test_loss += weighted_loss(y, y_test, None, mask_test)

            if class_mode:
                self.out_labels.append('acc_' + output_name)

                if class_mode == "categorical":
                    weighted_accuracy = weighted_objective(
                        categorical_accuracy)
                    train_accuracy = weighted_accuracy(y, y_train, None,
                                                       mask_train)
                    test_accuracy = weighted_accuracy(y, y_test, None,
                                                      mask_test)

                elif class_mode == "binary":
                    weighted_accuracy = weighted_objective(binary_accuracy)
                    train_accuracy = weighted_accuracy(y, y_train, None,
                                                       mask_train)
                    test_accuracy = weighted_accuracy(y, y_test, None,
                                                      mask_test)
                else:
                    raise Exception("Invalid class mode:" + str(class_mode))
                train_accs.append(train_accuracy)
                test_accs.append(test_accuracy)

        if verbose:
            print 'ys:', ys
            print 'ys_train:', ys_train
            print 'predict output order:', self.predict_output_order
            print 'ys_test:', ys_test
            print 'train output order:', self.train_output_order
            print 'train_accs:', train_accs
            print 'test_accs:', test_accs
            print 'weight:', weights
        if verbose == 2:
            print 'train_loss:', TU.sp(train_loss)
            print 'test_loss:', TU.sp(test_loss)
        if verbose >= 3:
            print 'train_loss:'
            TU.dp(train_loss)
            print 'test_loss:'
            TU.dp(test_loss)

        ins = []
        for input_unit in self.input_units:
            ins.append(input_unit.get_variable())

        if verbose:
            print 'ins:', ins

        train_vars = ins + ys + weights
        test_vars = ins + ys + weights

        for r in self.regularizers:
            train_loss = r(train_loss)

        updates = self.optimizer.get_updates(self.params, self.constraints,
                                             train_loss)

        state_updates = self.updates

        updates += state_updates

        if verbose:
            print 'train_vars:', train_vars
            print 'test_vars:', test_vars

        if verbose:
            print 'updates:'
            for update in updates:
                print update

        self._train = K.function(train_vars, [train_loss], updates=updates)
        self._train_with_acc = K.function(train_vars,
                                          [train_loss] + train_accs,
                                          updates=updates)
        self._test = K.function(test_vars, [test_loss], updates=state_updates)
        self._test_with_acc = K.function(test_vars, [test_loss] + test_accs,
                                         updates=state_updates)
        self._predict = K.function(inputs=ins,
                                   outputs=ys_test,
                                   updates=state_updates)