Beispiel #1
0
    def set_scoring_mode(self, scoring_mode):
        self._init_task_index()
        if (scoring_mode == ScoringMode.OneAndZeros):
            self._pos_mxts = B.set_subtensor(
                               self._pos_mxts[:,self._get_task_index()],
                               self._active)
            self._neg_mxts = B.set_subtensor(
                               self._neg_mxts[:,self._get_task_index()],
                               self._active)
        elif (scoring_mode == ScoringMode.SoftmaxPreActivation):
            #I was getting some weird NoneType errors when I tried
            #to compile this piece of the code, hence the shift to
            #accomplishing this bit via weight normalisation

            #n = self.get_activation_vars().shape[1]
            #self._mxts = B.ones_like(self.get_activation_vars())*(-1.0/n)
            #self._mxts = B.set_subtensor(self._mxts[:,self._get_task_index()],
            #                             (n-1.0)/n)
            raise NotImplementedError(
                                "Do via mean-normalisation of weights "
                                "instead; see what I did in "
                                "models.Model.set_pre_activation_target_layer")
        else:
            raise RuntimeError("Unsupported scoring_mode "+scoring_mode)
        self._set_mxts_updated_true()
Beispiel #2
0
def get_smoothen_function(window_size, same_size_return=True):
    """
        Returns a function for smoothening inputs with a window
         of size window_size.

        Returned function has arguments of inp,
         batch_size and progress_update
    """
    from deeplift import backend as B
    inp_tensor = B.tensor_with_dims(2, "inp_tensor") 

    if (same_size_return):
        #do padding so that the output will have the same size as the input
        #remember, the output will have length of input length - (window_size-1)
        #so, we're going to pad with int(window_size/2), and for even window_size
        #we will trim off the value from the front of the output later on
        padding = int(window_size/2)  
        new_dims = [inp_tensor.shape[0], inp_tensor.shape[1]+2*padding]
        padded_inp = B.zeros(new_dims)
        #fill the middle region with the original input
        padded_inp = B.set_subtensor(
                        padded_inp[:,padding:(inp_tensor.shape[1]+padding)],
                        inp_tensor) 
        #duplicate the left end for padding
        padded_inp = B.set_subtensor(padded_inp[:,0:padding],
                                     inp_tensor[:,0:padding])
        #duplicate the right end for padding
        padded_inp = B.set_subtensor(
                        padded_inp[:,(inp_tensor.shape[1]+padding):],
                        inp_tensor[:,(inp_tensor.shape[1]-padding):])
    else:
        padded_inp = inp_tensor
    padded_inp = padded_inp[:,None,None,:]

    averaged_padded_inp = B.pool2d(
                            inp=padded_inp,
                            pool_size=(1,window_size),
                            strides=(1,1),
                            border_mode="valid",
                            ignore_border=True,
                            pool_mode=B.PoolMode.avg) 

    #if window_size is even, then we have an extra value in the output,
    #so kick off the value from the front
    if (window_size%2==0 and same_size_return):
        averaged_padded_inp = averaged_padded_inp[:,:,:,1:]

    averaged_padded_inp = averaged_padded_inp[:,0,0,:]
    smoothen_func = B.function([inp_tensor], averaged_padded_inp)

    def smoothen(inp, batch_size, progress_update=None):
       return run_function_in_batches(
                func=smoothen_func,
                input_data_list=[inp],
                batch_size=batch_size,
                progress_update=progress_update)

    return smoothen