コード例 #1
0
ファイル: v1s.py プロジェクト: yamins81/dicarlo_reproduction
 def _sphere(self,fvectors):  
 
     # -- Sphere the training data
     #    (we will later use the sphering parameters obtained here to
     #     "sphere" the test data)
     print "sphering data..."
     v_sub = fvectors.mean(axis=0)
     fvectors -= v_sub
     v_div = fvectors.std(axis=0)
     scipy.putmask(v_div, v_div==0, 1)
     fvectors /= v_div
     return fvectors, v_sub, v_div
コード例 #2
0
ファイル: colorconv.py プロジェクト: cadieu/v1like
def oppnorm_convert(arr, threshold=0.1):
    #assert(arr.min()>=0 and arr.max()<=1)
    #out = sp.empty_like(arr)
    arr = arr.astype('float32')
    out = sp.empty(arr.shape[:2] + (2, ), dtype='float32')

    print out.shape

    # red-green
    out[:, :, 0] = arr[:, :, 0] - arr[:, :, 1]
    # blue-yellow
    out[:, :, 1] = arr[:, :, 2] - arr[:, :, [0, 1]].min(2)
    # intensity
    denom = arr.max(2)

    mask = denom < threshold  #*denom[:,:,2].mean()

    out[:, :, 0] /= denom
    out[:, :, 1] /= denom

    sp.putmask(out[:, :, 0], mask, 0)
    sp.putmask(out[:, :, 1], mask, 0)

    return out
コード例 #3
0
ファイル: colorconv.py プロジェクト: aparicio/v1like
def oppnorm_convert(arr, threshold=0.1):
    #assert(arr.min()>=0 and arr.max()<=1)
    #out = sp.empty_like(arr)
    arr = arr.astype('float32')
    out = sp.empty(arr.shape[:2]+(2,), dtype='float32')

    print out.shape

    # red-green
    out[:,:,0] = arr[:,:,0] - arr[:,:,1]
    # blue-yellow
    out[:,:,1] = arr[:,:,2] - arr[:,:,[0,1]].min(2)
    # intensity
    denom = arr.max(2)

    mask = denom < threshold#*denom[:,:,2].mean()
    
    out[:,:,0] /= denom    
    out[:,:,1] /= denom

    sp.putmask(out[:,:,0], mask, 0)
    sp.putmask(out[:,:,1], mask, 0)

    return out
コード例 #4
0
def get_simfunc_fvector(fdata1, fdata2, simfunc=DEFAULT_SIMFUNC):

    assert simfunc in VALID_SIMFUNCS

    if simfunc == 'diff':
        fvector = fdata1-fdata2

    elif simfunc == 'abs_diff':
        fvector = sp.absolute(fdata1-fdata2)

    elif simfunc == 'sq_diff':
        fvector = (fdata1-fdata2)**2.

    elif simfunc == 'sq_diff_o_sum':
        denom = (fdata1+fdata2)
        denom[denom==0] = 1
        fvector = ((fdata1-fdata2)**2.) / denom

    elif simfunc == 'sqrtabs_diff':
        fvector = sp.sqrt(sp.absolute(fdata1-fdata2))

    elif simfunc == 'mul':
        fvector = fdata1*fdata2

    elif simfunc == 'sqrt_mul':
        fvector = sp.sqrt(fdata1*fdata2)

    elif simfunc == 'sq_add':
        fvector = (fdata1 + fdata2)**2.

    elif simfunc == 'pseudo_AND_soft_range01':
        assert fdata1.min() != fdata1.max()
        fdata1 -= fdata1.min()
        fdata1 /= fdata1.max()
        assert fdata2.min() != fdata2.max()
        fdata2 -= fdata2.min()
        fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        fvector = 4. * (fdata1 / denom) * (fdata2 / denom)
        sp.putmask(fvector, sp.isnan(fvector), 0)
        sp.putmask(fvector, sp.isinf(fvector), 0)                        

    elif simfunc == 'concat':
        return sp.concatenate((fdata1, fdata2))

    # DDC additions, FWTW:
    elif simfunc == 'normalized_AND_soft':
        fvector = (fdata1 / fdata1.std()) * (fdata2 / fdata2.std())

    elif simfunc == 'normalized_AND_hard_0.5':
        fvector = ((fdata1 / fdata1.std()) * (fdata2 / fdata2.std()) > 0.5)

    elif simfunc == 'pseudo_AND_soft':
        # this is very similar to mul.  I think it may be one "explanation" for why mul is good
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = 4. * (fdata1 / denom) * (fdata2 / denom)  
        fvector[sp.isnan(fvector)] = 1 # correct behavior is to have the *result* be one
        fvector[sp.isinf(fvector)] = 1

    elif simfunc == 'pseudo_AND_hard_0.5':
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.5 )        

    elif simfunc == 'pseudo_AND_hard_0.25':
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.25 )

    elif simfunc == 'tmp':
        fvector = fdata1**2. + fdata2**2.

    elif simfunc == 'tmp2':
        fvector = fdata1**2. + fdata1*fdata2 + fdata2**2.

    #elif simfunc == 'pseudo_AND_soft':
    elif simfunc == 'tmp4':
        # this is very similar to mul.  I think it may be one "explanation" for why mul is good
        denom = fdata1 + fdata2
        denom[denom==0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = 4. * (fdata1 / denom) * (fdata2 / denom)          

    #elif simfunc == 'pseudo_AND_hard_0.5':
    elif simfunc == 'tmp5':        
        denom = fdata1 + fdata2
        denom[denom==0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.5 )

    #elif simfunc == 'pseudo_AND_hard_0.25':
    elif simfunc == 'tmp6':                
        denom = fdata1 + fdata2
        denom[denom==0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.25 )

    elif simfunc == 'tmp7':                
        denom = fdata1 + fdata2
        denom[denom==0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.1 )            
    elif simfunc == 'tmp8':
        #assert fdata1.min() != fdata1.max()
        #fdata1 -= fdata1.min()
        #fdata1 /= fdata1.max()
        #assert fdata2.min() != fdata2.max()
        #fdata2 -= fdata2.min()
        #fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        fvector = 4. * (fdata1 / denom) * (fdata2 / denom)
        #sp.putmask(fvector, sp.isnan(fvector), 0)
        fvector[sp.isnan(fvector)] = 0
        fvector[sp.isinf(fvector)] = 0
        assert(not sp.isnan(fvector).any())

    elif simfunc == 'tmp10':
        assert fdata1.min() != fdata1.max()
        fdata1 -= fdata1.min()
        fdata1 /= fdata1.max()
        assert fdata2.min() != fdata2.max()
        fdata2 -= fdata2.min()
        fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        #fvector = 4. * (fdata1 / denom) * (fdata2 / denom)
        fvector = ( (4. * (fdata1 / denom) * (fdata2 / denom)) > 0.25 )
        #sp.putmask(fvector, sp.isnan(fvector), 0)
        fvector[sp.isnan(fvector)] = 0
        fvector[sp.isinf(fvector)] = 0
        assert(not sp.isnan(fvector).any())

    return fvector
コード例 #5
0
ファイル: v1like_funcs.py プロジェクト: yamins81/ecc
def v1like_norm(hin, conv_mode, kshape, threshold):
    """ V1LIKE local normalization
    
    Each pixel in the input image is divisively normalized by the L2 norm
    of the pixels in a local neighborhood around it, and the result of this
    division is placed in the output image.   
    
    Inputs:
      hin -- a 3-dimensional array (width X height X rgb)
      kshape -- kernel shape (tuple) ex: (3,3) for a 3x3 normalization 
                neighborhood
      threshold -- magnitude threshold, if the vector's length is below 
                   it doesn't get resized ex: 1.    
     
    Outputs:
      hout -- a normalized 3-dimensional array (width X height X rgb)
      
    """
    
    eps = 1e-5
    kh, kw = kshape
    dtype = hin.dtype
    hsrc = hin[:].copy()

    # -- prepare hout
    hin_h, hin_w, hin_d = hin.shape
    hout_h = hin_h# - kh + 1
    hout_w = hin_w# - kw + 1

    if conv_mode != "same":
        hout_h = hout_h - kh + 1
        hout_w = hout_w - kw + 1
        
    hout_d = hin_d    
    hout = N.empty((hout_h, hout_w, hout_d), 'float32')

    # -- compute numerator (hnum) and divisor (hdiv)
    # sum kernel
    hin_d = hin.shape[-1]
    kshape3d = list(kshape) + [hin_d]            
    ker = N.ones(kshape3d, dtype=dtype)
    size = ker.size

    # compute sum-of-square
    hsq = hsrc ** 2.
    #hssq = conv(hsq, ker, conv_mode).astype(dtype)
    kerH = ker[:,0,0][:, None]#, None]
    kerW = ker[0,:,0][None, :]#, None]
    kerD = ker[0,0,:][None, None, :]

    #s = time.time()
    #r = conv(hsq, kerD, 'valid')[:,:,0]
    #print time.time()-s

    #s = time.time()
    hssq = conv(kerH, conv(kerW, conv(hsq, kerD, 'valid')[:,:,0].astype(dtype), conv_mode), conv_mode).astype(dtype)
    hssq = hssq[:,:,None]
    #print time.time()-s

    # compute hnum and hdiv
    ys = kh / 2
    xs = kw / 2
    hout_h, hout_w, hout_d = hout.shape[-3:]
    hs = hout_h
    ws = hout_w
    #hsum = conv(hsrc, ker, conv_mode).astype(dtype)
    hsum = conv(kerH, 
                conv(kerW, 
                     conv(hsrc, 
                          kerD, 'valid')[:,:,0].astype(dtype), 
                     conv_mode),
                conv_mode).astype(dtype)
    hsum = hsum[:,:,None]
    if conv_mode == 'same':
        hnum = hsrc - (hsum/size)
    else:
        hnum = hsrc[ys:ys+hs, xs:xs+ws] - (hsum/size)
    val = (hssq - (hsum**2.)/size)
    val[val<0] = 0
    hdiv = val ** (1./2) + eps

    # -- apply normalization
    # 'volume' threshold
    N.putmask(hdiv, hdiv < (threshold+eps), 1.)
    result = (hnum / hdiv)
    
    #print result.shape
    hout[:] = result
    #print hout.shape, hout.dtype
    return hout
コード例 #6
0
ファイル: v1like_funcs.py プロジェクト: yamins81/ecc
def v1s_norm(hin, conv_mode, kshape, threshold):
    """ V1S local normalization
    
    Each pixel in the input image is divisively normalized by the L2 norm
    of the pixels in a local neighborhood around it, and the result of this
    division is placed in the output image.   
    
    Inputs:
      hin -- a 3-dimensional array (width X height X rgb)
      kshape -- kernel shape (tuple) ex: (3,3) for a 3x3 normalization 
                neighborhood
      threshold -- magnitude threshold, if the vector's length is below 
                   it doesn't get resized ex: 1.    
     
    Outputs:
      hout -- a normalized 3-dimensional array (width X height X rgb)
      
    """
    
    eps = 1e-5
    kh, kw = kshape
    dtype = hin.dtype
    hsrc = hin[:].copy()

    # -- prepare hout
    hin_h, hin_w, hin_d = hin.shape
    hout_h = hin_h - kh + 1
    hout_w = hin_w - kw + 1
    hout_d = hin_d    
    hout = N.empty((hout_h, hout_w, hout_d), 'f')

    # -- compute numerator (hnum) and divisor (hdiv)
    # sum kernel
    hin_d = hin.shape[-1]
    kshape3d = list(kshape) + [hin_d]            
    ker = N.ones(kshape3d, dtype=dtype)
    size = ker.size

    # compute sum-of-square
    hsq = hsrc ** 2.
    hssq = conv(hsq, ker, conv_mode).astype(dtype)

    # compute hnum and hdiv
    ys = kh / 2
    xs = kw / 2
    hout_h, hout_w, hout_d = hout.shape[-3:]
    hs = hout_h
    ws = hout_w
    hsum = conv(hsrc, ker, conv_mode).astype(dtype)
    hnum = hsrc[ys:ys+hs, xs:xs+ws] - (hsum/size)
    val = (hssq - (hsum**2.)/size)
    N.putmask(val, val<0, 0) # to avoid negative sqrt
    hdiv = val ** (1./2) + eps

    # -- apply normalization
    # 'volume' threshold
    N.putmask(hdiv, hdiv < (threshold+eps), 1.)
    result = (hnum / hdiv)
    
    hout[:] = result
    return hout
コード例 #7
0
ファイル: v1like_funcs.py プロジェクト: jizhihang/v1like
def v1like_norm2(hin, conv_mode, kshape, threshold):
    """ V1LIKE local normalization

    Each pixel in the input image is divisively normalized by the L2 norm
    of the pixels in a local neighborhood around it, and the result of this
    division is placed in the output image.

    Inputs:
      hin -- a 3-dimensional array (width X height X rgb)
      kshape -- kernel shape (tuple) ex: (3,3) for a 3x3 normalization
                neighborhood
      threshold -- magnitude threshold, if the vector's length is below
                   it doesn't get resized ex: 1.

    Outputs:
      hout -- a normalized 3-dimensional array (width X height X rgb)

    """

    eps = 1e-5
    kh, kw = kshape
    dtype = hin.dtype
    hsrc = hin[:].copy()

    # -- prepare hout
    hin_h, hin_w, hin_d = hin.shape
    hout_h = hin_h  # - kh + 1
    hout_w = hin_w  # - kw + 1

    if conv_mode != "same":
        hout_h = hout_h - kh + 1
        hout_w = hout_w - kw + 1

    hout_d = hin_d
    hout = N.empty((hout_h, hout_w, hout_d), 'float32')

    # -- compute numerator (hnum) and divisor (hdiv)
    # sum kernel
    hin_d = hin.shape[-1]
    kshape3d = list(kshape) + [hin_d]
    ker = N.ones(kshape3d, dtype=dtype)
    size = ker.size

    # compute sum-of-square
    hsq = hsrc**2.
    #hssq = conv(hsq, ker, conv_mode).astype(dtype)
    kerH = ker[:, 0, 0][:, None]  #, None]
    kerW = ker[0, :, 0][None, :]  #, None]
    kerD = ker[0, 0, :][None, None, :]

    #s = time.time()
    #r = conv(hsq, kerD, 'valid')[:,:,0]
    #print time.time()-s

    #s = time.time()
    hssq = conv(
        conv(conv(hsq, kerD, 'valid')[:, :, 0].astype(dtype), kerW, conv_mode),
        kerH, conv_mode).astype(dtype)
    #hssq = conv(kerH,
    #conv(kerW,
    #conv(hsq, kerD, 'valid')[:,:,0].astype(dtype),
    #conv_mode),
    #conv_mode).astype(dtype)
    hssq = hssq[:, :, None]
    #print time.time()-s

    # compute hnum and hdiv
    ys = kh / 2
    xs = kw / 2
    hout_h, hout_w, hout_d = hout.shape[-3:]
    hs = hout_h
    ws = hout_w
    #hsum = conv(hsrc, ker, conv_mode).astype(dtype)
    hsum = conv(
        conv(
            conv(hsrc, kerD, 'valid')[:, :, 0].astype(dtype), kerW, conv_mode),
        kerH, conv_mode).astype(dtype)
    #hsum = conv(kerH,
    #conv(kerW,
    #conv(hsrc,
    #kerD, 'valid')[:,:,0].astype(dtype),
    #conv_mode),
    #conv_mode).astype(dtype)
    hsum = hsum[:, :, None]
    if conv_mode == 'same':
        hnum = hsrc - (hsum / size)
    else:
        hnum = hsrc[ys:ys + hs, xs:xs + ws] - (hsum / size)
    val = (hssq - (hsum**2.) / size)
    val[val < 0] = 0
    hdiv = val**(1. / 2) + eps

    # -- apply normalization
    # 'volume' threshold
    N.putmask(hdiv, hdiv < (threshold + eps), 1.)
    result = (hnum / hdiv)

    #print result.shape
    hout[:] = result
    #print hout.shape, hout.dtype
    return hout
コード例 #8
0
ファイル: v1like_funcs.py プロジェクト: jizhihang/v1like
def v1like_norm(hin, conv_mode, kshape, threshold):
    """ V1S local normalization

    Each pixel in the input image is divisively normalized by the L2 norm
    of the pixels in a local neighborhood around it, and the result of this
    division is placed in the output image.

    Inputs:
      hin -- a 3-dimensional array (width X height X rgb)
      kshape -- kernel shape (tuple) ex: (3,3) for a 3x3 normalization
                neighborhood
      threshold -- magnitude threshold, if the vector's length is below
                   it doesn't get resized ex: 1.

    Outputs:
      hout -- a normalized 3-dimensional array (width X height X rgb)

    """

    eps = 1e-5
    kh, kw = kshape
    dtype = hin.dtype
    hsrc = hin[:].copy()

    # -- prepare hout
    hin_h, hin_w, hin_d = hin.shape
    hout_h = hin_h - kh + 1
    hout_w = hin_w - kw + 1
    hout_d = hin_d
    hout = N.empty((hout_h, hout_w, hout_d), 'f')

    # -- compute numerator (hnum) and divisor (hdiv)
    # sum kernel
    hin_d = hin.shape[-1]
    kshape3d = list(kshape) + [hin_d]
    ker = N.ones(kshape3d, dtype=dtype)
    size = ker.size

    # compute sum-of-square
    hsq = hsrc**2.
    hssq = conv(hsq, ker, conv_mode).astype(dtype)

    # compute hnum and hdiv
    ys = kh / 2
    xs = kw / 2
    hout_h, hout_w, hout_d = hout.shape[-3:]
    hs = hout_h
    ws = hout_w
    hsum = conv(hsrc, ker, conv_mode).astype(dtype)
    hnum = hsrc[ys:ys + hs, xs:xs + ws] - (hsum / size)
    val = (hssq - (hsum**2.) / size)
    N.putmask(val, val < 0, 0)  # to avoid negative sqrt
    hdiv = val**(1. / 2) + eps

    # -- apply normalization
    # 'volume' threshold
    N.putmask(hdiv, hdiv < (threshold + eps), 1.)
    result = (hnum / hdiv)

    hout[:] = result
    return hout
コード例 #9
0
def get_simfunc_fvector(fdata1, fdata2, simfunc=DEFAULT_SIMFUNC):

    assert simfunc in VALID_SIMFUNCS

    if simfunc == "diff":
        fvector = fdata1 - fdata2

    elif simfunc == "abs_diff":
        fvector = sp.absolute(fdata1 - fdata2)

    elif simfunc == "sq_diff":
        fvector = (fdata1 - fdata2) ** 2.0

    elif simfunc == "sq_diff_o_sum":
        denom = fdata1 + fdata2
        denom[denom == 0] = 1
        fvector = ((fdata1 - fdata2) ** 2.0) / denom

    elif simfunc == "sqrtabs_diff":
        fvector = sp.sqrt(sp.absolute(fdata1 - fdata2))

    elif simfunc == "mul":
        fvector = fdata1 * fdata2

    elif simfunc == "sqrt_mul":
        fvector = sp.sqrt(fdata1 * fdata2)

    elif simfunc == "sq_add":
        fvector = (fdata1 + fdata2) ** 2.0

    elif simfunc == "pseudo_AND_soft_range01":
        assert fdata1.min() != fdata1.max()
        fdata1 -= fdata1.min()
        fdata1 /= fdata1.max()
        assert fdata2.min() != fdata2.max()
        fdata2 -= fdata2.min()
        fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        fvector = 4.0 * (fdata1 / denom) * (fdata2 / denom)
        sp.putmask(fvector, sp.isnan(fvector), 0)
        sp.putmask(fvector, sp.isinf(fvector), 0)

    elif simfunc == "concat":
        return sp.concatenate((fdata1, fdata2))

    # DDC additions, FWTW:
    elif simfunc == "normalized_AND_soft":
        fvector = (fdata1 / fdata1.std()) * (fdata2 / fdata2.std())

    elif simfunc == "normalized_AND_hard_0.5":
        fvector = (fdata1 / fdata1.std()) * (fdata2 / fdata2.std()) > 0.5

    elif simfunc == "pseudo_AND_soft":
        # this is very similar to mul.  I think it may be one "explanation" for why mul is good
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = 4.0 * (fdata1 / denom) * (fdata2 / denom)
        fvector[sp.isnan(fvector)] = 1  # correct behavior is to have the *result* be one
        fvector[sp.isinf(fvector)] = 1

    elif simfunc == "pseudo_AND_hard_0.5":
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.5

    elif simfunc == "pseudo_AND_hard_0.25":
        denom = fdata1 + fdata2
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.25

    elif simfunc == "tmp":
        fvector = fdata1 ** 2.0 + fdata2 ** 2.0

    elif simfunc == "tmp2":
        fvector = fdata1 ** 2.0 + fdata1 * fdata2 + fdata2 ** 2.0

    # elif simfunc == 'pseudo_AND_soft':
    elif simfunc == "tmp4":
        # this is very similar to mul.  I think it may be one "explanation" for why mul is good
        denom = fdata1 + fdata2
        denom[denom == 0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = 4.0 * (fdata1 / denom) * (fdata2 / denom)

    # elif simfunc == 'pseudo_AND_hard_0.5':
    elif simfunc == "tmp5":
        denom = fdata1 + fdata2
        denom[denom == 0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.5

    # elif simfunc == 'pseudo_AND_hard_0.25':
    elif simfunc == "tmp6":
        denom = fdata1 + fdata2
        denom[denom == 0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.25

    elif simfunc == "tmp7":
        denom = fdata1 + fdata2
        denom[denom == 0] = 1
        # goes from 1 when fdata1==fdata2, to 0 when they are very different
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.1
    elif simfunc == "tmp8":
        # assert fdata1.min() != fdata1.max()
        # fdata1 -= fdata1.min()
        # fdata1 /= fdata1.max()
        # assert fdata2.min() != fdata2.max()
        # fdata2 -= fdata2.min()
        # fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        fvector = 4.0 * (fdata1 / denom) * (fdata2 / denom)
        # sp.putmask(fvector, sp.isnan(fvector), 0)
        fvector[sp.isnan(fvector)] = 0
        fvector[sp.isinf(fvector)] = 0
        assert not sp.isnan(fvector).any()

    elif simfunc == "tmp10":
        assert fdata1.min() != fdata1.max()
        fdata1 -= fdata1.min()
        fdata1 /= fdata1.max()
        assert fdata2.min() != fdata2.max()
        fdata2 -= fdata2.min()
        fdata2 /= fdata2.max()
        denom = fdata1 + fdata2
        # fvector = 4. * (fdata1 / denom) * (fdata2 / denom)
        fvector = (4.0 * (fdata1 / denom) * (fdata2 / denom)) > 0.25
        # sp.putmask(fvector, sp.isnan(fvector), 0)
        fvector[sp.isnan(fvector)] = 0
        fvector[sp.isinf(fvector)] = 0
        assert not sp.isnan(fvector).any()

    return fvector
コード例 #10
0
ファイル: utakata_time_freq.py プロジェクト: mackee/utakata
 def binarize(self, target_data, threshold):
   data = sp.maximum(target_data, threshold)
   sp.putmask(data, data>threshold, 1.)
   sp.putmask(data, data<=threshold, 0.)
   self.binarized_data = data