Пример #1
0
    def tocoo(self, copy=True):
        """Convert this matrix to COOrdinate format.

        When copy=False the data array will be shared between
        this matrix and the resultant coo_matrix.
        """

        M,N = self.shape
        R,C = self.blocksize

        indptr_diff = np.diff(self.indptr)
        if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
            # Check for potential overflow
            indptr_diff_limited = indptr_diff.astype(np.intp)
            if np.any(indptr_diff_limited != indptr_diff):
                raise ValueError("Matrix too big to convert")
            indptr_diff = indptr_diff_limited

        row = (R * np.arange(M//R)).repeat(indptr_diff)
        row = row.repeat(R*C).reshape(-1,R,C)
        row += np.tile(np.arange(R).reshape(-1,1), (1,C))
        row = row.reshape(-1)

        col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
        col += np.tile(np.arange(C), (R,1))
        col = col.reshape(-1)

        data = self.data.reshape(-1)

        if copy:
            data = data.copy()

        from .coo import coo_matrix
        return coo_matrix((data,(row,col)), shape=self.shape)
Пример #2
0
    def test_n_dimensional_log_encoding_CanonLog(self):
        """
        Tests :func:`colour.models.rgb.transfer_functions.canon_log.\
log_encoding_CanonLog` definition n-dimensional arrays support.
        """

        L = 0.18
        V = 0.312012855550395
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.tile(L, 6)
        V = np.tile(V, 6)
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.reshape(L, (2, 3))
        V = np.reshape(V, (2, 3))
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.reshape(L, (2, 3, 1))
        V = np.reshape(V, (2, 3, 1))
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)
 def __set_static_gaus_pmfs(self):
     if np.logical_not(self.off_buff.is_full()):
         print "The long term buffer is not yet full.  This may give undesirable results"
     
     # median RSS of off-state buffer
     cal_med = self.off_buff.get_no_nan_median()
     
     if (np.sum(cal_med == 127) > 0) | (np.sum(np.isnan(cal_med)) > 0):
         sys.stderr.write('At least one link has a median of 127 or is nan\n\n')
         quit()
          
     if (np.sum(np.isnan(self.off_buff.get_nanvar())) > 0):
         sys.stderr.write('the long term buffer has a nan')
         quit()
     
     cal_med_mat = np.tile(cal_med,(self.V_mat.shape[1],1)).T
     
     # variance of RSS during calibration
     cal_var = np.maximum(self.off_buff.get_nanvar(),self.omega) #3.0 
     cal_var_mat = np.tile(cal_var,(self.V_mat.shape[1],1)).T
     
     # Compute the off_link emission probabilities for each link
     x = np.exp(- (self.V_mat - cal_med_mat)**2/(2*cal_var_mat/1.0)) # 1.0
     self.off_links = self.__normalize_pmf(x)
     
     # Compute the on_link emission probabilities for each link
     x = np.exp(- (self.V_mat - (cal_med_mat-self.Delta))**2/(self.eta*2*cal_var_mat)) # 3
     self.on_links = self.__normalize_pmf(x) 
Пример #4
0
def testSoftmaxMNIST():
    x_, y_ = getData("training_images.gz", "training_labels.gz")
    
    
    N = 600
    
    x = x_[0:N].reshape(N, 784).T/255.0
    y = np.zeros((10, N))

    for i in xrange(N):
        y [y_[i][0]][i] = 1

    
    #nn1 = SimpleNN(784, 800, 10, 100, 0.15, 0.4, False)
    #nn2 = SimpleNN(784, 800, 10, 1, 0.15, 0.4, False)
    nn3 = Softmax(784, 800, 1, 10, 0.15, 0, False)
    nn4 = Softmax(784, 800, 10, 10, 0.35, 0, False)
    
    #nn1.Train(x, y)
    #nn2.Train(x, y)
    nn3.Train(x, y)
    nn4.Train(x, y)
    
    N = 10000    
    
    x_, y_ = getData("test_images.gz", "test_labels.gz")
    x = x_.reshape(N, 784).T/255.0
    y = y_.T

    correct = np.zeros((4, 1))

    print "Testing"
    startTime = time()
    for i in xrange(N):
        #h1 = nn1.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        #h2 = nn2.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        h3 = nn3.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        h4 = nn4.Evaluate(np.tile(x.T[i].T, (1, 1)).T)

        #if h1[y[0][i]][0] > 0.8:
        #    correct[0][0] += 1

        #if h2[y[0][i]][0] > 0.8:
        #    correct[1][0] += 1

        if h3[y[0][i]][0] > 0.8:
            correct[2][0] += 1

        if h4[y[0][i]][0] > 0.8:
            correct[3][0] += 1

        if(i > 0):
            stdout.write("Testing %d/%d image. Time Elapsed: %ds. \r" % (i, N, time() - startTime))
            stdout.flush()

    stdout.write("\n")
    #print "Accuracy 1: ", correct[0][0]/10000.0 * 100, "%"
    #print "Accuracy 2: ", correct[1][0]/10000.0 * 100, "%"
    print "Accuracy 3: ", correct[2][0]/10000.0 * 100, "%"
    print "Accuracy 4: ", correct[3][0]/10000.0 * 100, "%"     
    def all_shar_trials(nblocks=5, ntargets=8, distance=10):
        '''
        Generates a sequence of 2D (x and z) target pairs with the first target
        always at the origin and a second field indicating the extractor type (always shared)
        '''
        #Make blocks of 80 trials: 
        theta = []
        for i in range(10):
            temp = np.arange(0, 2*np.pi, 2*np.pi/ntargets)
            np.random.shuffle(temp)
            theta = theta + [temp]
        theta = np.hstack(theta)

        #Each target has correct % of private and correct % of shared targets
        trial_type = np.empty(len(theta), dtype='S10')
        trial_type[:] = 'shared'

        #Make Target set: 
        x = distance*np.cos(theta)
        y = np.zeros(len(theta))
        z = distance*np.sin(theta)
        
        pairs = np.zeros([len(theta), 2, 3])
        pairs[:,1,:] = np.vstack([x, y, z]).T

        Pairs = np.tile(pairs, [nblocks, 1, 1])
        Trial_type = np.tile(trial_type, [nblocks])

        #Will yield a tuple where target location is in next_trial[0], trial_type is in next_trial[1]
        return zip(Pairs, Trial_type)
Пример #6
0
    def EVzxVzxT_single(self, Z, A, B, i):

        P = Z.shape[0]
        R = A.shape[1]

        A = np.reshape(A[i, :], [1, R])
        # B=np.reshape(B[i,:],[1,R])

        alpha = self.length_scale * self.length_scale
        S = B[0, 0] * B[0, 0]

        logdetM = pow(2 * S / alpha + 1.0, -R / 2.0)

        res = np.zeros([P, P])

        Sinv = 1 / (2 / alpha + 1 / S)

        ZZt = Z.dot(Z.T)

        E1 = (-0.5 * (1 / alpha) + 0.5 * (1 / alpha) * (1 / alpha) * (Sinv)) * np.tile(ZZt.diagonal(), [P, 1]).T
        E2 = (-0.5 * (1 / S) + 0.5 * (1 / S) * (1 / S) * (Sinv)) * A.dot(A.T)
        E3 = (1 / alpha) * (1 / S) * (Sinv) * Z.dot(A.T) + 0.5 * np.tile(E2, [P, 1])
        E4 = (1 / alpha) * (1 / alpha) * (Sinv) * ZZt

        E3e = np.tile(E3, [1, P]) + np.tile(E3, [1, P]).T
        res = logdetM * np.exp(E1 + E1.T + E4 + E3e)

        return res
    def __update_b_vec(self,cur_obs):
        # convert measurement vector into emission probabilities
        # repeat the observation in columns
        cur_obs_mat = np.tile(cur_obs,(self.V_mat.shape[1],1)).T
        masked_mat = cur_obs_mat == self.V_mat

        # Extract the probability of the observation on each link for each state
        p_obs_given_off_link = np.sum(self.off_links*masked_mat,axis=1)
        p_obs_given_on_link  = np.sum(self.on_links*masked_mat,axis=1)

        # replicate the probability of each measurement on each link for each state
        p_obs_mat_off = np.tile(p_obs_given_off_link,(self.num_states,1)).T
        p_obs_mat_on  = np.tile(p_obs_given_on_link,(self.num_states,1)).T

        # Compute emission probabilities
        tmp1 = self.codewords*p_obs_mat_on
        tmp2 = np.logical_not(self.codewords)*p_obs_mat_off
        tmp3 = tmp1 + tmp2
        
        # divide tmp3 into groups of 4.  Multiply and normalize
        prev = np.ones(self.num_states)
        start_mark = 0
        end_mark = 4
        group = end_mark
        while start_mark < self.num_links:
            current = np.product(tmp3[start_mark:np.minimum(self.num_links,end_mark),:],axis=0)
            current = current/np.sum(current)
            prev = (prev*current)/np.sum(prev*current)
            end_mark += group
            start_mark += group

        # add emission probabilities to the circular buffer
        self.C.add_observation(prev)        
Пример #8
0
    def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        if hyperno != 0:
            return EVzxVzxT_list_this * 0

        alpha = self.length_scale * self.length_scale

        I = np.identity(R)
        S = np.diag(B[0, :] * B[0, :])
        Sinv = np.diag(1 / B[0, :] * B[0, :])
        C = I * alpha
        Cinv = I * (1 / alpha)
        CinvSinv = 2 * Cinv + Sinv
        CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())

        dC = self.length_scale * I
        dCinv = -Cinv.dot(dC).dot(Cinv)
        dCinvSinv = 2 * dCinv
        dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)

        S1 = (
            dCinv
            - dCinv.dot(CinvSinv_inv).dot(Cinv)
            - Cinv.dot(dCinvSinv_inv).dot(Cinv)
            - Cinv.dot(CinvSinv_inv).dot(dCinv)
        )
        S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
        S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
        S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)

        T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
        T1 = np.tile(T1s, [N, 1, 1])
        T2s = T1s.T
        T2 = np.tile(T2s, [N, 1, 1])
        T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
        T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
        T4 = np.expand_dims(T4, axis=2)
        T4 = np.repeat(T4, P, axis=2)
        T5 = A.dot(S3).dot(Z.T)
        T5 = np.expand_dims(T5, axis=2)
        T5 = np.repeat(T5, P, axis=2)
        T6 = np.swapaxes(T5, 1, 2)

        SCinvI = 2 * Cinv.dot(S) + I
        SCinvI_inv = np.diag(1 / SCinvI.diagonal())
        (temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
        detSCinvI = np.exp(logDetSCinvI)
        dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()

        expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)

        res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm

        res = np.sum(res, axis=0)

        return res
Пример #9
0
    def grad_EVzxVzxT_by_Z(self, EVzxVzxT_list_this, Z, A, B, p, r):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        ainv = 1 / (self.length_scale * self.length_scale)
        siginv = 1 / (B[0, 0] * B[0, 0])

        dZthis = np.zeros([1, R])

        dZthis[0, r] = 1

        res1 = -0.5 * (dZthis.dot(Z[p, :]) + Z[p, :].dot(dZthis.T)) * (ainv - ainv * (1 / (siginv + 2 * ainv)) * ainv)

        res2 = np.tile(dZthis.dot(A.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv), [P, 1])

        res3 = np.tile(dZthis.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * ainv), [N, 1])

        dZ = np.zeros([N, P, P])

        dZ[:, p, :] += np.float64(res1) + res2.T + res3
        dZ[:, :, p] += np.float64(res1) + res2.T + res3

        # set the diagonal
        # dZ[:,p,p] = dZ[:,p,p]/2.

        res = np.sum(EVzxVzxT_list_this * dZ, axis=0)

        return res
Пример #10
0
    def get_new_columns(self):
        if self.value_columns is None:
            return self.removed_level

        stride = len(self.removed_level)
        width = len(self.value_columns)
        propagator = np.repeat(np.arange(width), stride)
        if isinstance(self.value_columns, MultiIndex):
            new_levels = self.value_columns.levels + [self.removed_level]
            new_names = self.value_columns.names + [self.removed_name]

            new_labels = [lab.take(propagator)
                          for lab in self.value_columns.labels]
            new_labels.append(np.tile(np.arange(stride), width))
        else:
            new_levels = [self.value_columns, self.removed_level]
            new_names = [self.value_columns.name, self.removed_name]

            new_labels = []

            new_labels.append(propagator)
            new_labels.append(np.tile(np.arange(stride), width))

        return MultiIndex(levels=new_levels, labels=new_labels,
                          names=new_names)
def add_bbox_regression_targets(roidb):
    num_images = len(roidb)
    num_classes = roidb[0]['gt_overlaps'].shape[1]
    for idx in xrange(num_images):
        rois = roidb[idx]['boxes']
        max_overlaps = roidb[idx]['max_overlaps']
        max_classes = roidb[idx]['max_classes']
        roidb[idx]['bbox_targets'] = _compute_targets(rois, max_overlaps, max_classes)

        means = np.tile(
            np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1))
        stds = np.tile(
            np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1))

        print 'means'
        print means
        print 'stds'
        print stds

        print 'Normalizing targets'
        for idx in xrange(num_images):
            targets = roidb[idx]['bbox_targets']
            for cls in xrange(1, num_classes):
                cls_inds = np.where(targets[:, 0] == cls)[0]
                roidb[idx]['bbox_targets'] -= means[cls, :]
                roidb[idx]['bbox_targets'] /= stds[cls, :]

        return means.ravel(), stds.ravel()
    def __init__(self, nrows, ncols):
        self.nrows = nrows
        self.ncols = ncols
        self.num_elements = nrows * ncols

        self.X = np.tile(np.arange(self.ncols, dtype = np.double).reshape((1, self.ncols))*np.sqrt(3),
                         (self.nrows, 1))
        if (self.ncols % 2 == 0):
            self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
                             (1, self.ncols//2))
        else:
            self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
                             (1, self.ncols//2+1))
            self.Y = self.Y[:,0:-1]
        self.col = np.tile(np.arange(self.ncols, dtype = np.int32).reshape((1, self.ncols)),
                           (self.nrows, 1))
        self.row = np.tile(np.arange(self.nrows, dtype = np.int32).reshape((self.nrows, 1)),
                           (1, self.ncols))

        #self.Y = self.Y + np.tile(np.asarray([0, 1]),
        #                          (self.nrows, self.ncols/2))

        self.col = self.col.reshape(-1)
        self.row = self.row.reshape(-1)
        self.num = np.arange(self.num_elements, dtype = np.int32).reshape(nrows, ncols)
Пример #13
0
    def test_mean_std_12bit(self):
        # Input 12-bit, with an 8-bit color target
        input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
        color_target = np.tile(np.arange(256)[:, None, None], (1, 1, 3))

        luts = hm.mean_std_luts(input_scene.astype(np.uint16),
                                color_target.astype(np.uint8))

        np.testing.assert_array_equal(luts[0], luts[1])
        np.testing.assert_array_equal(luts[1], luts[2])

        lut = luts[0]
        assert np.all(lut[:8] == 0)
        assert np.all(lut[-8:] == 4096)
        assert np.diff(lut[8:-8]).min() == 1
        assert np.diff(lut[8:-8]).max() == 2

        # Input 12-bit, with a 12-bit color target
        input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
        color_target = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))

        luts = hm.mean_std_luts(input_scene.astype(np.uint16),
                                color_target.astype(np.uint16))

        # Should be a 1 to 1 look-up-table...
        np.testing.assert_array_equal(luts[0], np.arange(4097))
Пример #14
0
def _csd_array(x, sfreq, window_fun, eigvals, freq_mask, freq_mask_mt, n_fft,
               mode, mt_adaptive):
    """Calculate Fourier transform using multitaper module.

    The arguments correspond to the values in `compute_csd_epochs` and
    `csd_array`.
    """
    x_mt, _ = _mt_spectra(x, window_fun, sfreq, n_fft)

    if mt_adaptive:
        # Compute adaptive weights
        _, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
                                           return_weights=True)
        # Tiling weights so that we can easily use _csd_from_mt()
        weights = weights[:, np.newaxis, :, :]
        weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
    else:
        # Do not use adaptive weights
        if mode == 'multitaper':
            weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :, np.newaxis]
        else:
            # Hack so we can sum over axis=-2
            weights = np.array([1.])[:, np.newaxis, np.newaxis, np.newaxis]

    x_mt = x_mt[:, :, freq_mask_mt]

    # Calculating CSD
    # Tiling x_mt so that we can easily use _csd_from_mt()
    x_mt = x_mt[:, np.newaxis, :, :]
    x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
    y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
    weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
    csds = _csd_from_mt(x_mt, y_mt, weights, weights_y)

    return csds
Пример #15
0
    def test_001_t(self):
        num_frames = 5
        total_subcarriers = 8
        used_subcarriers = 4
        channel_map = ft.get_channel_map(used_subcarriers, total_subcarriers)
        payload_symbols = 8
        overlap = 4
        num_preamble_symbols = 4

        payload = ft.get_payload(payload_symbols, used_subcarriers)
        frame = ft.get_frame(payload, total_subcarriers, channel_map, payload_symbols, overlap)
        frame = np.tile(frame, num_frames).flatten()
        payload = np.tile(payload, num_frames).flatten()

        # set up fg
        src = blocks.vector_source_c(frame, repeat=False, vlen=total_subcarriers)
        deframer = fbmc.deframer_vcb(used_subcarriers, total_subcarriers, num_preamble_symbols, payload_symbols, overlap, channel_map)
        snk = blocks.vector_sink_b(1)
        self.tb.connect(src, deframer, snk)
        self.tb.run()

        # check data
        res = np.array(snk.data())
        print res
        print payload

        self.assertTupleEqual(tuple(payload), tuple(res))
Пример #16
0
    def test_append_concat(self):
        rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
        ts = Series(np.random.randn(len(rng)), rng)
        df = DataFrame(np.random.randn(len(rng), 4), index=rng)

        result = ts.append(ts)
        result_df = df.append(df)
        ex_index = DatetimeIndex(np.tile(rng.values, 2))
        tm.assert_index_equal(result.index, ex_index)
        tm.assert_index_equal(result_df.index, ex_index)

        appended = rng.append(rng)
        tm.assert_index_equal(appended, ex_index)

        appended = rng.append([rng, rng])
        ex_index = DatetimeIndex(np.tile(rng.values, 3))
        tm.assert_index_equal(appended, ex_index)

        # different index names
        rng1 = rng.copy()
        rng2 = rng.copy()
        rng1.name = 'foo'
        rng2.name = 'bar'
        assert rng1.append(rng1).name == 'foo'
        assert rng1.append(rng2).name is None
Пример #17
0
def linSVM(new_dset, validF):
    print "loading dataset"
    new_dset = L.RhythmDataset('/Users/Tlacael/NYU/RhythmData/lmd_scalars1x64.pkl',"/Users/Tlacael/NYU/RhythmData/"+new_dset,valid=validF,test=(validF+1)%10, dim=[64,1])
    #get training set
    print "loading training set"
    xAll = [new_dset.get(i[0])[0] for i in new_dset.split_idx['train']]
    xAll = np.concatenate(xAll)
    xAll = xAll.reshape(xAll.shape[0],xAll.shape[2])
    
    #get classes for training set
    print "loading validation set"
    classAll=[np.tile(new_dset.get(i[0])[1],(new_dset.get(i[0])[0].shape[0],)) for i in new_dset.split_idx['train']]
    target=np.concatenate(classAll)

    #get validation set
    xVerify = [new_dset.get(i[0])[0] for i in new_dset.split_idx['valid']]
    xVerify = np.concatenate(xVerify)
    xVerify = xVerify.reshape(xVerify.shape[0],xVerify.shape[2])

    
    classVer=[np.tile(new_dset.get(i[0])[1],(new_dset.get(i[0])[0].shape[0],)) for i in new_dset.split_idx['valid']]
    targetVer=np.concatenate(classVer)


    print "building model"
    svc = svm.SVC(kernel='linear', verbose=True)
    print "fit data"
    svc.fit(xAll,target)

    scre = svc.score(xVerify,targetVer)
    print "score: ", scre
    return scre
Пример #18
0
 def _verifySolve(self, x, y, batch_dims=None):
   for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
     if np_type == np.float32 or np_type == np.complex64:
       tol = 1e-5
     else:
       tol = 1e-12
     for adjoint in False, True:
       if np_type is [np.float32, np.float64]:
         a = x.real().astype(np_type)
         b = y.real().astype(np_type)
       else:
         a = x.astype(np_type)
         b = y.astype(np_type)
         a_np = np.conj(np.transpose(a)) if adjoint else a
       if batch_dims is not None:
         a = np.tile(a, batch_dims + [1, 1])
         a_np = np.tile(a_np, batch_dims + [1, 1])
         b = np.tile(b, batch_dims + [1, 1])
       np_ans = np.linalg.solve(a_np, b)
       for use_placeholder in False, True:
         with self.test_session(use_gpu=True) as sess:
           if use_placeholder:
             a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
             out = sess.run(tf_ans, {a_ph: a, b_ph: b})
           else:
             tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
             out = tf_ans.eval()
             self.assertEqual(tf_ans.get_shape(), out.shape)
           self.assertEqual(np_ans.shape, out.shape)
           self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
Пример #19
0
def generate_anchors(base_size=16, ratios=None, scales=None):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales w.r.t. a reference window.
    """

    if ratios is None:
        ratios = np.array([0.5, 1, 2])

    if scales is None:
        scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])

    num_anchors = len(ratios) * len(scales)

    # initialize output anchors
    anchors = np.zeros((num_anchors, 4))

    # scale base_size
    anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T

    # compute areas of anchors
    areas = anchors[:, 2] * anchors[:, 3]

    # correct for ratios
    anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
    anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))

    # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
    anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
    anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T

    return anchors
Пример #20
0
    def calculate(self, start_cfg, target_cfg):
        print "Start: ", start_cfg
        print "target: ", target_cfg
        # First we make a relative movement vom [0, 0, 0] to [x, y, z]
        diff = np.subtract(target_cfg[:3], start_cfg)

        # Now we apply our motion profile over the length of that resulting vector
        length = np.sqrt(diff.dot(diff))

        profile = MotionProfileAsync(np.array([4]), np.array([0.8]))

        trajectory_length = profile.calculate(np.array([0]), np.array([length]), 0.01)

        trajectory_length = trajectory_length.reshape((trajectory_length.shape[0],))

        # And convert those back to absolute cartesian coordinates
        trajectory = np.add(
            start_cfg,
            np.multiply(
                diff,
                np.tile(
                    np.divide(trajectory_length, np.tile([length], trajectory_length.shape[0])).reshape(
                        (trajectory_length.shape[0], 1)
                    ),
                    3,
                ),
            ),
        )

        if trajectory.shape[0] != 0:
            trajectory[trajectory.shape[0] - 1] = target_cfg[:3]

        return self.sample_trajectory(trajectory, target_cfg[3:])
Пример #21
0
def hausdorffnorm(A, B):
    '''
    Finds the hausdorff norm between two matrices A and B.
    INPUTS:
    A: numpy array
    B : numpy array
    OUTPUTS:
    Housdorff norm between matrices A and B
    '''
    # ensure matrices are 3 dimensional, and shaped conformably
    if len(A.shape) == 1:
        A = np.atleast_2d(A)

    if len(B.shape) == 1:
        B = np.atleast_2d(B)

    A = np.atleast_3d(A)
    B = np.atleast_3d(B)

    x, y, z = B.shape
    A = np.reshape(A, (z, x, y))
    B = np.reshape(B, (z, x, y))

    # find hausdorff norm: starting from A to B
    z, x, y = B.shape
    temp1 = np.tile(np.reshape(B.T, (y, z, x)), (max(A.shape), 1))
    temp2 = np.tile(np.reshape(A.T, (y, x, z)), (1, max(B.shape)))
    D1 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    # starting from B to A
    temp1 = np.tile(np.reshape(A.T, (y, z, x)), (max(B.shape), 1))
    temp2 = np.tile(np.reshape(B.T, (y, x, z)), (1, max(A.shape)))
    D2 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    return np.max([D1, D2])
Пример #22
0
    def test_003_block_pinching(self):
        n_reps = 1
        n_subcarriers = 8
        n_timeslots = 8
        block_len = n_subcarriers * n_timeslots
        cp_len = 8
        ramp_len = 4
        cs_len = ramp_len * 2
        window_len = get_window_len(cp_len, n_timeslots, n_subcarriers, cs_len)
        window_taps = get_raised_cosine_ramp(ramp_len, window_len)
        data = np.arange(block_len, dtype=np.complex) + 1
        ref = add_cyclic_starfix(data, cp_len, cs_len)
        ref = pinch_block(ref, window_taps)
        data = np.tile(data, n_reps)
        ref = np.tile(ref, n_reps)
        print "input is: ", len(data), " -> " , len(ref)
        # short_window = np.concatenate((window_taps[0:ramp_len], window_taps[-ramp_len:]))
        prefixer = gfdm.cyclic_prefixer_cc(block_len, cp_len, cs_len, ramp_len, window_taps)
        src = blocks.vector_source_c(data)
        dst = blocks.vector_sink_c()
        self.tb.connect(src, prefixer, dst)
        self.tb.run()

        res = np.array(dst.data())
        print ref[-10:]
        print res[-10:]

        self.assertComplexTuplesAlmostEqual(res, ref, 4)
Пример #23
0
def reconstr_freq(center_freq, pts, sweep_up=True, bdwth=1.):
    ''' Reconstruct frequency array.

    Arguments:
    center_freq -- center frequency of each sweep. float or np.array
    pts -- dimension of the frequency array. int
    **sweep_up -- first sweep frequency increases. defautl True. boolean
    **bdwth -- sweep bandwidth (MHz), default 1. float

    Returns:
    freq -- frequency array, np.array 1D/2D
    '''

    if sweep_up:
        single_band = bdwth * (np.arange(pts)/(pts-1) - 0.5)
    else:
        single_band = bdwth * (0.5 - np.arange(pts)/(pts-1))

    if isinstance(center_freq, np.ndarray):
        freq = np.tile(single_band, (len(center_freq), 1)).transpose() + \
               np.tile(center_freq, (pts, 1))
    else:
        freq = single_band + center_freq

    return freq
Пример #24
0
    def write_frames(self, length=10, change_frequency=6.0, checker_size=48):
        """Write video frames to file.

        Parameters
        ----------
        length : float
            Length in seconds of the written frames
        change_frequency : float
            Frequency of change in the stimulus in Hz
        checker_size : int
            Number of pixels for each checker field

        """
        # Prepare image
        checkerboard = np.tile(
            np.kron(np.array([[0, 1], [1, 0]]),
                    np.ones((checker_size, checker_size))),
            (checker_size, checker_size))
        checkerboard = checkerboard[:self._frame_size[1], :self._frame_size[0]]
        image = np.tile(checkerboard[:, :, np.newaxis] * 255, (1, 1, 3))

        frame_change = self._fps // change_frequency
        assert frame_change == int(frame_change)

        # Write frames
        for frame_num in range(int(length * self._fps)):
            if frame_num % frame_change == 0:
                image = 255 - image
            self._video_writer.write(image)
Пример #25
0
 def trans_param_to_current_array(self, quantity_dict, trans_param,
                                  model='LIF', mcnc_grouping=None,
                                  std=None):
     quantity_array = quantity_dict['quantity_array']
     quantity_rate_array = np.abs(np.gradient(quantity_array)) / DT
     if model == 'LIF':
         current_array = trans_param[0] * quantity_array +\
             trans_param[1] * quantity_rate_array + trans_param[2]
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(
                 loc=0., scale=std, size=quantity_array.shape)
     if model == 'Lesniak':
         trans_param = np.tile(trans_param, (4, 1))
         trans_param[:, :2] = np.multiply(
             trans_param[:, :2].T, mcnc_grouping).T
         quantity_array = np.tile(quantity_array, (mcnc_grouping.size, 1)).T
         quantity_rate_array = np.tile(
             quantity_rate_array, (mcnc_grouping.size, 1)).T
         current_array = np.multiply(quantity_array, trans_param[:, 0]) +\
             np.multiply(quantity_rate_array, trans_param[:, 1]) +\
             np.multiply(np.ones_like(quantity_array), trans_param[:, 2])
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(loc=0., scale=std,
                                               size=quantity_array.shape)
     return current_array
Пример #26
0
def deframesignal(frames,signal_length,frame_length,frame_step,winfunc=lambda x:numpy.ones((x,))):
    '''定义函数对原信号的每一帧进行变换,应该是为了消除关联性
    参数定义:
    frames:audio2frame函数返回的帧矩阵
    signal_length:信号长度
    frame_length:帧长度
    frame_step:帧间隔
    winfunc:对每一帧加window函数进行分析,默认此处不加window
    '''
    #对参数进行取整操作
    signal_length=round(signal_length) #信号的长度
    frame_length=round(frame_length) #帧的长度
    frames_num=numpy.shape(frames)[0] #帧的总数
    assert numpy.shape(frames)[1]==frame_length,'"frames"矩阵大小不正确,它的列数应该等于一帧长度'  #判断frames维度 
    indices=numpy.tile(numpy.arange(0,frame_length),(frames_num,1))+numpy.tile(numpy.arange(0,frames_num*frame_step,frame_step),(frame_length,1)).T  #相当于对所有帧的时间点进行抽取,得到frames_num*frame_length长度的矩阵
    indices=numpy.array(indices,dtype=numpy.int32)
    pad_length=(frames_num-1)*frame_step+frame_length #铺平后的所有信号
    if signal_length<=0:
        signal_length=pad_length
    recalc_signal=numpy.zeros((pad_length,)) #调整后的信号
    window_correction=numpy.zeros((pad_length,1)) #窗关联
    win=winfunc(frame_length)
    for i in range(0,frames_num):
        window_correction[indices[i,:]]=window_correction[indices[i,:]]+win+1e-15 #表示信号的重叠程度
        recalc_signal[indices[i,:]]=recalc_signal[indices[i,:]]+frames[i,:] #原信号加上重叠程度构成调整后的信号
    recalc_signal=recalc_signal/window_correction #新的调整后的信号等于调整信号处以每处的重叠程度 
    return recalc_signal[0:signal_length] #返回该新的调整信号
Пример #27
0
    def sample_predictive_parameters(self):
        Lext = \
            np.vstack((self.L, np.sqrt(self.eta) * np.random.randn(1, self.dim)))

        # Compute mean and covariance over extended space
        D = ((Lext[:,None,:] - Lext[None,:,:])**2).sum(2)
        Mu = -D + self.b
        Mu_row = np.tile(Mu[-1,:][:,None], (1,self.B))
        Mu_row[-1] = self._self_gaussian.mu
        Mu_col = Mu_row.copy()

        # Mu = np.tile(Mu[:,:,None], (1,1,self.B))
        # for n in xrange(self.N+1):
        #     Mu[n,n,:] = self._self_gaussian.mu

        L = np.linalg.cholesky(self.cov.sigma)
        L_row = np.tile(L[None,:,:], (self.N+1, 1, 1))
        L_row[-1] = np.linalg.cholesky(self._self_gaussian.sigma)
        L_col = L_row.copy()

        # L = np.tile(L[None,None,:,:], (self.N+1, self.N+1, 1, 1))
        # for n in xrange(self.N+1):
        #     L[n,n,:,:] = np.linalg.cholesky(self._self_gaussian.sigma)

        # Mu_row, Mu_col = Mu[-1,:,:], Mu[:,-1,:]
        # L_row, L_col = L[-1,:,:,:], L[:,-1,:,:]
        return Mu_row, Mu_col, L_row, L_col
Пример #28
0
def boxfilter(I, r):
    """Fast box filter implementation.

    Parameters
    ----------
    I:  a single channel/gray image data normalized to [0.0, 1.0]
    r:  window radius

    Return
    -----------
    The filtered image data.
    """
    M, N = I.shape
    dest = np.zeros((M, N))

    # cumulative sum over Y axis
    sumY = np.cumsum(I, axis=0)
    # difference over Y axis
    dest[:r + 1] = sumY[r: 2 * r + 1]
    dest[r + 1:M - r] = sumY[2 * r + 1:] - sumY[:M - 2 * r - 1]
    dest[-r:] = np.tile(sumY[-1], (r, 1)) - sumY[M - 2 * r - 1:M - r - 1]

    # cumulative sum over X axis
    sumX = np.cumsum(dest, axis=1)
    # difference over Y axis
    dest[:, :r + 1] = sumX[:, r:2 * r + 1]
    dest[:, r + 1:N - r] = sumX[:, 2 * r + 1:] - sumX[:, :N - 2 * r - 1]
    dest[:, -r:] = np.tile(sumX[:, -1][:, None], (1, r)) - \
        sumX[:, N - 2 * r - 1:N - r - 1]

    return dest
Пример #29
0
 def getPointsForInterpolation(self,EndOfPrdvP,aNrmNow):
     '''
     Find endogenous interpolation points for each asset point and each
     discrete preference shock.
     
     Parameters
     ----------
     EndOfPrdvP : np.array
         Array of end-of-period marginal values.
     aNrmNow : np.array
         Array of end-of-period asset values that yield the marginal values
         in EndOfPrdvP.
         
     Returns
     -------
     c_for_interpolation : np.array
         Consumption points for interpolation.
     m_for_interpolation : np.array
         Corresponding market resource points for interpolation.
     '''
     c_base       = self.uPinv(EndOfPrdvP)
     PrefShkCount = self.PrefShkVals.size
     PrefShk_temp = np.tile(np.reshape(self.PrefShkVals**(1.0/self.CRRA),(PrefShkCount,1)),
                            (1,c_base.size))
     self.cNrmNow = np.tile(c_base,(PrefShkCount,1))*PrefShk_temp
     self.mNrmNow = self.cNrmNow + np.tile(aNrmNow,(PrefShkCount,1))
     
     # Add the bottom point to the c and m arrays
     m_for_interpolation = np.concatenate((self.BoroCnstNat*np.ones((PrefShkCount,1)),
                                           self.mNrmNow),axis=1)
     c_for_interpolation = np.concatenate((np.zeros((PrefShkCount,1)),self.cNrmNow),axis=1)
     return c_for_interpolation,m_for_interpolation
 def _verifySolveBatch(self, x, y):
   # Since numpy.linalg.lsqr does not support batch solves, as opposed
   # to numpy.linalg.solve, we just perform this test for a fixed batch size
   # of 2x3.
   for np_type in [np.float32, np.float64]:
     a = np.tile(x.astype(np_type), [2, 3, 1, 1])
     b = np.tile(y.astype(np_type), [2, 3, 1, 1])
     np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]])
     for dim1 in range(2):
       for dim2 in range(3):
         np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq(
             a[dim1, dim2, :, :], b[dim1, dim2, :, :])
     for fast in [True, False]:
       with self.test_session():
         tf_ans = tf.batch_matrix_solve_ls(a, b, fast=fast).eval()
       self.assertEqual(np_ans.shape, tf_ans.shape)
       # Check residual norm.
       tf_r = b - BatchMatMul(a, tf_ans)
       tf_r_norm = np.sum(tf_r * tf_r)
       np_r = b - BatchMatMul(a, np_ans)
       np_r_norm = np.sum(np_r * np_r)
       self.assertAllClose(np_r_norm, tf_r_norm)
       # Check solution.
       if fast or a.shape[-2] >= a.shape[-1]:
         # We skip this test for the underdetermined case when using the
         # slow path, because Eigen does not return a minimum norm solution.
         # TODO(rmlarsen): Enable this check for all paths if/when we fix
         # Eigen's solver.
         self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
Пример #31
0
def tsne(X=Math.array([]), no_dims=2, initial_dims=50, perplexity=30.0):
    """Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
	The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""

    # Check inputs
    if X.dtype != "float64":
        print("Error: array X should have type float64.")
        return -1
    #if no_dims.__class__ != "<type 'int'>":			# doesn't work yet!
    #	print "Error: number of dimensions should be an integer.";
    #	return -1;

    # Initialize variables
    X = pca(X, initial_dims)
    (n, d) = X.shape
    max_iter = 1000
    initial_momentum = 0.5
    final_momentum = 0.8
    eta = 500
    min_gain = 0.01
    Y = Math.random.randn(n, no_dims)
    dY = Math.zeros((n, no_dims))
    iY = Math.zeros((n, no_dims))
    gains = Math.ones((n, no_dims))

    # Compute P-values
    P = x2p(X, 1e-5, perplexity)
    P = P + Math.transpose(P)
    P = P / Math.sum(P)
    P = P * 4
    # early exaggeration
    P = Math.maximum(P, 1e-12)

    # Run iterations
    for iter in range(max_iter):

        # Compute pairwise affinities
        sum_Y = Math.sum(Math.square(Y), 1)
        num = 1 / (1 +
                   Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y))
        num[list(range(n)), list(range(n))] = 0
        Q = num / Math.sum(num)
        Q = Math.maximum(Q, 1e-12)

        # Compute gradient
        PQ = P - Q
        for i in range(n):
            dY[i, :] = Math.sum(
                Math.tile(PQ[:, i] * num[:, i],
                          (no_dims, 1)).T * (Y[i, :] - Y), 0)

        # Perform the update
        if iter < 20:
            momentum = initial_momentum
        else:
            momentum = final_momentum
        gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * (
            (dY > 0) == (iY > 0))
        gains[gains < min_gain] = min_gain
        iY = momentum * iY - eta * (gains * dY)
        Y = Y + iY
        Y = Y - Math.tile(Math.mean(Y, 0), (n, 1))

        # Compute current value of cost function
        if (iter + 1) % 10 == 0:
            C = Math.sum(P * Math.log(P / Q))
            print("Iteration ", (iter + 1), ": error is ", C)

        # Stop lying about P-values
        if iter == 100:
            P = P / 4

    # Return solution
    return Y
Пример #32
0
    def __init__(self,
                 latent_dim,
                 Y,
                 inputs=None,
                 emissions=None,
                 px1_mu=None,
                 px1_cov=None,
                 kern=None,
                 Z=None,
                 n_ind_pts=100,
                 mean_fn=None,
                 Q_diag=None,
                 Umu=None,
                 Ucov_chol=None,
                 qx1_mu=None,
                 qx1_cov=None,
                 As=None,
                 bs=None,
                 Ss=None,
                 n_samples=100,
                 batch_size=None,
                 chunking=False,
                 seed=None,
                 parallel_iterations=10,
                 jitter=gp.settings.numerics.jitter_level,
                 name=None):

        super().__init__(latent_dim,
                         Y[0],
                         inputs=None if inputs is None else inputs[0],
                         emissions=emissions,
                         px1_mu=px1_mu,
                         px1_cov=None,
                         kern=kern,
                         Z=Z,
                         n_ind_pts=n_ind_pts,
                         mean_fn=mean_fn,
                         Q_diag=Q_diag,
                         Umu=Umu,
                         Ucov_chol=Ucov_chol,
                         qx1_mu=qx1_mu,
                         qx1_cov=None,
                         As=None,
                         bs=None,
                         Ss=False if Ss is False else None,
                         n_samples=n_samples,
                         seed=seed,
                         parallel_iterations=parallel_iterations,
                         jitter=jitter,
                         name=name)

        self.T = [Y_s.shape[0] for Y_s in Y]
        self.T_tf = tf.constant(self.T, dtype=gp.settings.int_type)
        self.max_T = max(self.T)
        self.sum_T = float(sum(self.T))
        self.n_seq = len(self.T)
        self.batch_size = batch_size
        self.chunking = chunking

        if self.batch_size is None:
            self.Y = ParamList(Y, trainable=False)
        else:
            _Y = np.stack([
                np.concatenate(
                    [Ys, np.zeros((self.max_T - len(Ys), self.obs_dim))])
                for Ys in Y
            ])
            self.Y = Param(_Y, trainable=False)

        if inputs is not None:
            if self.batch_size is None:
                self.inputs = ParamList(inputs, trainable=False)
            else:
                desired_length = self.max_T if self.chunking else self.max_T - 1
                _inputs = [
                    np.concatenate([
                        inputs[s],
                        np.zeros(
                            (desired_length - len(inputs[s]), self.input_dim))
                    ]) for s in range(self.n_seq)
                ]  # pad the inputs
                self.inputs = Param(_inputs, trainable=False)

        if qx1_mu is None:
            self.qx1_mu = Param(np.zeros((self.n_seq, self.latent_dim)))

        self.qx1_cov_chol = Param(
            np.tile(np.eye(self.latent_dim)[None, ...], [self.n_seq, 1, 1])
            if qx1_cov is None else np.linalg.cholesky(qx1_cov),
            transform=gtf.LowerTriangular(self.latent_dim,
                                          num_matrices=self.n_seq))

        _As = [np.ones((T_s - 1, self.latent_dim))
               for T_s in self.T] if As is None else As
        _bs = [np.zeros((T_s - 1, self.latent_dim))
               for T_s in self.T] if bs is None else bs
        if Ss is not False:
            _S_chols = [np.tile(self.Q_sqrt.value.copy()[None, ...], [T_s - 1, 1]) for T_s in self.T] if Ss is None \
                else [np.sqrt(S) if S.ndim == 2 else np.linalg.cholesky(S) for S in Ss]

        if self.batch_size is None:
            self.As = ParamList(_As)
            self.bs = ParamList(_bs)
            if Ss is not False:
                self.S_chols = ParamList([
                    Param(Sc,
                          transform=gtf.positive if Sc.ndim == 2 else
                          gtf.LowerTriangular(self.latent_dim,
                                              num_matrices=Sc.shape[0]))
                    for Sc in _S_chols
                ])
        else:
            _As = np.stack([
                np.concatenate(
                    [_A,
                     np.zeros((self.max_T - len(_A) - 1, *_A.shape[1:]))])
                for _A in _As
            ])
            _bs = np.stack([
                np.concatenate([
                    _b,
                    np.zeros((self.max_T - len(_b) - 1, self.latent_dim))
                ]) for _b in _bs
            ])
            self.As = Param(_As)
            self.bs = Param(_bs)
            if Ss is not False:
                _S_chols = [
                    np.concatenate([
                        _S,
                        np.zeros((self.max_T - len(_S) - 1, *_S.shape[1:]))
                    ]) for _S in _S_chols
                ]
                _S_chols = np.stack(_S_chols)
                self.S_chols = Param(_S_chols, transform=gtf.positive if _S_chols.ndim == 3 else \
                    gtf.LowerTriangular(self.latent_dim, num_matrices=(self.n_seq, self.max_T - 1)))

        self.multi_diag_px1_cov = False
        if isinstance(px1_cov, list):  # different prior for each sequence
            _x1_cov = np.stack(px1_cov)
            _x1_cov = np.sqrt(
                _x1_cov) if _x1_cov.ndim == 2 else np.linalg.cholesky(_x1_cov)
            _transform = None if _x1_cov.ndim == 2 else gtf.LowerTriangular(
                self.latent_dim, num_matrices=self.n_seq)
            self.multi_diag_px1_cov = _x1_cov.ndim == 2
        elif isinstance(px1_cov, np.ndarray):  # same prior for each sequence
            assert px1_cov.ndim < 3
            _x1_cov = np.sqrt(
                px1_cov) if px1_cov.ndim == 1 else np.linalg.cholesky(px1_cov)
            _transform = None if px1_cov.ndim == 1 else gtf.LowerTriangular(
                self.latent_dim, squeeze=True)

        self.px1_cov_chol = None if px1_cov is None else Param(
            _x1_cov, trainable=False, transform=_transform)

        if self.chunking:
            px1_mu_check = len(self.px1_mu.shape) == 1
            px1_cov_check_1 = not self.multi_diag_px1_cov
            px1_cov_check_2 = self.px1_cov_chol is None or len(
                self.px1_cov_chol.shape) < 3
            assert px1_mu_check and px1_cov_check_1 and px1_cov_check_2, \
                'Only one prior over x1 allowed for chunking'
Пример #33
0
df = pd.DataFrame(reader)
print(df)

dfa = df
type(dfa)

count = 0
for columns in df.columns:
    count = count + 1

mean_array = dfa.mean(axis=0)
type(mean_array)
mean_cols = np.asarray(mean_array)
type(mean_cols)

columnMeanAll = np.tile(mean_cols, (dfa.shape[0], 1))
type(columnMeanAll)
len(columnMeanAll)
xMeanCentered = dfa - columnMeanAll

np.dataframe(xMeanCentered)
type(xMeanCentered)

df = xMeanCentered

rows, cols = df.shape
print("Rows=", rows, " Col=", cols)

type(df)

samples = np.array(df.loc[:, :])
Пример #34
0
def oneDdisorderpotential(m,n,lc,pos):
    """
    ============================================================================
    Create one-dimensional disorder potential on graphene lattice
    ============================================================================
    
    This function creates an one-dimensional spatially-correlated Gaussian 
    disorder potential for graphene. The method used is the same as that in the 
    paper: Choi, SangKook, Cheol-Hwan Park, and Steven G. Louie. 
    "Electron supercollimation in graphene and Dirac Fermion materials 
    using one-dimensional disorder potentials." 
    Physical review letters 113.2 (2014): 026802. 
    
    
    To be clearer, the one-dimensional spatially-correlated Gaussian disorder 
    potential can be in the form of a random vector having the two-point 
    spatial correlation property. Hence, firstly, a random vector consisting of 
    spatially-uncorrelated Gaussian-random variables is composed. Next, using the 
    positions of atoms taken as input parameters, the two-point spatial correlation
    matrix is created and Cholesky decomposition method is used to obtain the 
    matrix with desired spatial correlation property. Finally, the final vector 
    is the dot product of the random vector and matrix with the required spatial 
    correlation property.
    
    
    Inputs
    ----------
    m : integer
        Number of atoms along the x-direction

    n : integer
        Number of atoms along the y-direction

    lc : float
        correlation length
    
    pos: float, tuple
        A tuple containing position information of atoms

    Returns
    -------
    Wfinal: float, array
         The final potential at each x position
    """
    
    assert type(n) is int, "Initial number of rows of carbon atoms must be an integer"
    assert type(m) is int, "Initial number of columns of carbon atoms must be an integer"
    assert type(lc) is float or int, "The correlation length must be numeric"
    assert type(pos) is tuple, "The pos must be a tuple"
    
    #Exctracting each unique x position.
    X = pos[0]
    x_1 = X[0:n*m:n,0]
    x_2 = X[1:n*m:n,0]

    #Parameters for the strength of the disorder potential in units of eV.
    Delta = 0.3

    #Generating sample of random numbers of Guassian distribution.
    V = np.random.normal(0,1,m)

    #Generate the two-point matrix for two rows along x axis
    X1 = np.tile(x_1,(m,1))
    X2 = X1.T
    X3 = np.tile(x_2,(m,1))
    X4 = X3.T

    #Generate the two-point spatial correlation matrix for two rows
    C1 = Delta**2*np.exp(-abs(X1-X2)/lc)
    C2 = Delta**2*np.exp(-abs(X3-X4)/lc)

    #Cholesky decomposition of the two-point correlation matrix and generate the final random vector
    L1 = np.linalg.cholesky(C1)
    W1 = np.dot(L1,V)
    L2 = np.linalg.cholesky(C2)
    W2 = np.dot(L2,V)

    #Reshaping for further calculations.
    Wf=np.zeros((n,m))
    Wf[0:n:2,0:m]=W1
    Wf[1:n:2,0:m]=W2
    Wfinal=Wf.T.reshape((n*m,1))
    
    return Wfinal
Пример #35
0
 def rule_firing(self, x):
     # Evaluates membership functions on each input for the whole batch
     F = np.reshape(np.exp(-0.5 * ((np.tile(x, (1, self.m)) - self.mus) ** 2) / (self.sigmas ** 2)),
                    (-1, self.m, self.n))
     # Gets the firing strenght of each rule by applying T-norm (product in this case)
     return np.prod(F, axis=2)
Пример #36
0
def visualize_detection_results(result_dict,
                                tag,
                                global_step,
                                categories,
                                summary_dir='',
                                export_dir='',
                                agnostic_mode=False,
                                show_groundtruth=False,
                                groundtruth_box_visualization_color='black',
                                min_score_thresh=.5,
                                max_num_predictions=20,
                                skip_scores=False,
                                skip_labels=False,
                                keep_image_id_for_visualization_export=False):
  """Visualizes detection results and writes visualizations to image summaries.

  This function visualizes an image with its detected bounding boxes and writes
  to image summaries which can be viewed on tensorboard.  It optionally also
  writes images to a directory. In the case of missing entry in the label map,
  unknown class name in the visualization is shown as "N/A".

  Args:
    result_dict: a dictionary holding groundtruth and detection
      data corresponding to each image being evaluated.  The following keys
      are required:
        'original_image': a numpy array representing the image with shape
          [1, height, width, 3] or [1, height, width, 1]
        'detection_boxes': a numpy array of shape [N, 4]
        'detection_scores': a numpy array of shape [N]
        'detection_classes': a numpy array of shape [N]
      The following keys are optional:
        'groundtruth_boxes': a numpy array of shape [N, 4]
        'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
      Detections are assumed to be provided in decreasing order of score and for
      display, and we assume that scores are probabilities between 0 and 1.
    tag: tensorboard tag (string) to associate with image.
    global_step: global step at which the visualization are generated.
    categories: a list of dictionaries representing all possible categories.
      Each dict in this list has the following keys:
          'id': (required) an integer id uniquely identifying this category
          'name': (required) string representing category name
            e.g., 'cat', 'dog', 'pizza'
          'supercategory': (optional) string representing the supercategory
            e.g., 'animal', 'vehicle', 'food', etc
    summary_dir: the output directory to which the image summaries are written.
    export_dir: the output directory to which images are written.  If this is
      empty (default), then images are not exported.
    agnostic_mode: boolean (default: False) controlling whether to evaluate in
      class-agnostic mode or not.
    show_groundtruth: boolean (default: False) controlling whether to show
      groundtruth boxes in addition to detected boxes
    groundtruth_box_visualization_color: box color for visualizing groundtruth
      boxes
    min_score_thresh: minimum score threshold for a box to be visualized
    max_num_predictions: maximum number of detections to visualize
    skip_scores: whether to skip score when drawing a single detection
    skip_labels: whether to skip label when drawing a single detection
    keep_image_id_for_visualization_export: whether to keep image identifier in
      filename when exported to export_dir
  Raises:
    ValueError: if result_dict does not contain the expected keys (i.e.,
      'original_image', 'detection_boxes', 'detection_scores',
      'detection_classes')
  """
  detection_fields = fields.DetectionResultFields
  input_fields = fields.InputDataFields
  if not set([
      input_fields.original_image,
      detection_fields.detection_boxes,
      detection_fields.detection_scores,
      detection_fields.detection_classes,
  ]).issubset(set(result_dict.keys())):
    raise ValueError('result_dict does not contain all expected keys.')
  if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
    raise ValueError('If show_groundtruth is enabled, result_dict must contain '
                     'groundtruth_boxes.')
  tf.logging.info('Creating detection visualizations.')
  category_index = label_map_util.create_category_index(categories)

  image = np.squeeze(result_dict[input_fields.original_image], axis=0)
  if image.shape[2] == 1:  # If one channel image, repeat in RGB.
    image = np.tile(image, [1, 1, 3])
  detection_boxes = result_dict[detection_fields.detection_boxes]
  detection_scores = result_dict[detection_fields.detection_scores]
  detection_classes = np.int32((result_dict[
      detection_fields.detection_classes]))
  detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
  detection_masks = result_dict.get(detection_fields.detection_masks)
  detection_boundaries = result_dict.get(detection_fields.detection_boundaries)

  # Plot groundtruth underneath detections
  if show_groundtruth:
    groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
    groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        image=image,
        boxes=groundtruth_boxes,
        classes=None,
        scores=None,
        category_index=category_index,
        keypoints=groundtruth_keypoints,
        use_normalized_coordinates=False,
        max_boxes_to_draw=None,
        groundtruth_box_visualization_color=groundtruth_box_visualization_color)
  vis_utils.visualize_boxes_and_labels_on_image_array(
      image,
      detection_boxes,
      detection_classes,
      detection_scores,
      category_index,
      instance_masks=detection_masks,
      instance_boundaries=detection_boundaries,
      keypoints=detection_keypoints,
      use_normalized_coordinates=False,
      max_boxes_to_draw=max_num_predictions,
      min_score_thresh=min_score_thresh,
      agnostic_mode=agnostic_mode,
      skip_scores=skip_scores,
      skip_labels=skip_labels)

  if export_dir:
    if keep_image_id_for_visualization_export and result_dict[fields.
                                                              InputDataFields()
                                                              .key]:
      export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
          tag, result_dict[fields.InputDataFields().key]))
    else:
      export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
    vis_utils.save_image_array_as_png(image, export_path)

  summary = tf.Summary(value=[
      tf.Summary.Value(
          tag=tag,
          image=tf.Summary.Image(
              encoded_image_string=vis_utils.encode_image_array_as_png_str(
                  image)))
  ])
  summary_writer = tf.summary.FileWriterCache.get(summary_dir)
  summary_writer.add_summary(summary, global_step)

  tf.logging.info('Detection visualizations written to summary with tag %s.',
                  tag)
Пример #37
0
linestyles = ['-', ':']
colors=['k', 'r']
nVisits = 2


nSystems = 10
within_matrix = np.zeros((nSub*2*nSystems,1))
for sub in np.arange(nSub):
	for sys in np.arange(nSystems):
		within_matrix[sys+(nSystems*2*sub),0] = average_within_mat[sys,sys,sub,0]
		print(sys+(nSystems*2*sub))
		within_matrix[sys+(nSystems*2*sub)+nSystems,0] = average_within_mat[sys,sys,sub,1]
		print(sys+(nSystems*2*sub)+nSystems)
ses_vec = np.ones((nSystems*2,1))
ses_vec[nSystems:,0] = 3
ses_vec = np.tile(ses_vec,(nSub,1))
group_vec = np.zeros((nSub,1))
group_vec[MDD_ind,0] = 1
group_vec = np.reshape(np.repeat(group_vec,(nSystems*2)),(nSub*2*nSystems,1))
subject_vec = np.reshape(np.repeat(subjects,nSystems*2),(nSub*2*nSystems,1))
network_vec = np.reshape(np.tile(np.arange(nSystems),2*nSub),(nSub*2*nSystems,1))
# make a dataframe -- starting from array
# want: subjectNumber, group, visit, 
all_data = np.concatenate((subject_vec,group_vec,ses_vec,within_matrix,network_vec),axis=1)
all_cols = ['subject', 'group', 'session'] + systems_to_keep_abbrv 
data = pd.DataFrame(data=all_data,columns=['subject', 'group', 'session', 'CON', 'network']  )

fig = plt.figure()
g = sns.catplot(data=data,x='network',y='CON', hue='group', kind='bar',col='session',ci=68,palette=['k', 'r'],alpha=0.5)
plt.show()
Пример #38
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description="Guided mode example")
    parser.add_argument('-ids','--quad_ids', nargs='+', help='<Required> IDs of all quads used', required=True)
    parser.add_argument("-f", "--filename", dest='log_filename', default='log_runway_000', type=str, help="Log file name")
    parser.add_argument("-L", "--new_length", dest='new_length', default=Settings.RUNWAY_LENGTH, type=float, help="Override the 124 m runway length")
    parser.add_argument("-W", "--new_width", dest='new_width', default=Settings.RUNWAY_WIDTH, type=float, help="Override the 12.5 m runway width")
    parser.add_argument("-alt", "--altitude", dest='altitude', default=2, type=float, help="Override the 2 m first quad altitude")
    parser.add_argument("-no_avg", "--dont_average_output", dest="dont_average_output", action="store_true")
    parser.add_argument("-fail", "--failure_times", nargs='+', dest="failure_times",default=[9999.9])
    args = parser.parse_args()

    failure_times = list(map(float, args.failure_times))
  
    
    if Settings.INDOORS:
        runway_angle_wrt_north = 0
    else:
        runway_angle_wrt_north = (156.485-90)*np.pi/180 #[rad]
    C_bI = make_C_bI(runway_angle_wrt_north) # [3x3] rotation matrix from North (I) to body (tilted runway)
    C_bI_22 = make_C_bI_22(runway_angle_wrt_north) # [2x2] rotation matrix from North (I) to body (tilted runway)
    
    if args.new_length != Settings.RUNWAY_LENGTH:
        print("\nOverwriting %.1f m runway length with user-defined %.1f m runway length." %(Settings.RUNWAY_LENGTH,args.new_length))
    runway_length = args.new_length
    if args.new_width != Settings.RUNWAY_WIDTH:
        print("\nOverwriting %.1f m runway width with user-defined %.1f m runway width." %(Settings.RUNWAY_WIDTH,args.new_width))
    runway_width = args.new_width
    
    first_quad_altitude = args.altitude
    
    # Communication delay length in timesteps
    COMMUNICATION_DELAY_LENGTH = 0 # timesteps
    if COMMUNICATION_DELAY_LENGTH > 0:
        print("\nA simulated communication delay of %.1f seconds is used" %(COMMUNICATION_DELAY_LENGTH*Settings.TIMESTEP))

    interface = None
    not_done = True
    
    failure_acknowledged = np.tile(False,len(failure_times))
    
    average_deep_guidance_NED = np.zeros([Settings.NUMBER_OF_QUADS, Settings.ACTION_SIZE])
    
    # converting this input from a list of strings to a list of ints
    all_ids = list(map(int, args.quad_ids))
    
    log_filename = args.log_filename
    max_duration = 100000
    log_placeholder = np.zeros((max_duration, 3*Settings.NUMBER_OF_QUADS + 1 + Settings.TOTAL_STATE_SIZE + 6))
    log_counter = 0 # for log increment
    
    # Flag to not average the guidance output
    dont_average_output = args.dont_average_output
    if dont_average_output:
        print("\n\nDeep guidance output is NOT averaged\n\n")
    else:
        print("\n\nDeep guidance output is averaged\n\n")
    
    timestep = Settings.TIMESTEP
    
    
    # Generate Polygons for runway tiles
    # The size of each runway grid element
    each_runway_length_element = Settings.RUNWAY_LENGTH/Settings.RUNWAY_LENGTH_ELEMENTS
    each_runway_width_element  = Settings.RUNWAY_WIDTH/Settings.RUNWAY_WIDTH_ELEMENTS
    tile_polygons = []
    for i in range(Settings.RUNWAY_LENGTH_ELEMENTS):
        this_row = []
        for j in range(Settings.RUNWAY_WIDTH_ELEMENTS):
            # make the polygon
            this_row.append(Polygon([[each_runway_length_element*i     - Settings.RUNWAY_LENGTH/2, each_runway_width_element*j     - Settings.RUNWAY_WIDTH/2],
                                     [each_runway_length_element*(i+1) - Settings.RUNWAY_LENGTH/2, each_runway_width_element*j     - Settings.RUNWAY_WIDTH/2],
                                     [each_runway_length_element*(i+1) - Settings.RUNWAY_LENGTH/2, each_runway_width_element*(j+1) - Settings.RUNWAY_WIDTH/2],
                                     [each_runway_length_element*i     - Settings.RUNWAY_LENGTH/2, each_runway_width_element*(j+1) - Settings.RUNWAY_WIDTH/2]]))
            
        tile_polygons.append(this_row)
    
    
    ### Deep guidance initialization stuff
    tf.reset_default_graph()

    # Initialize Tensorflow, and load in policy
    with tf.Session() as sess:
        # Building the policy network
        state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = "state_placeholder")
        actor = BuildActorNetwork(state_placeholder, scope='learner_actor_main')
    
        # Loading in trained network weights
        print("Attempting to load in previously-trained model\n")
        saver = tf.train.Saver() # initialize the tensorflow Saver()
    
        # Try to load in policy network parameters
        try:
            ckpt = tf.train.get_checkpoint_state('../')
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("\nModel successfully loaded!\n")
    
        except (ValueError, AttributeError):
            print("No model found... quitting :(")
            raise SystemExit
    
        #######################################################################
        ### Guidance model is loaded, now get data and run it through model ###
        #######################################################################


        try:
            start_time = time.time()
            g = Guidance(interface=interface, quad_ids = all_ids)
            sleep(0.1)
            # g.set_guided_mode()
            sleep(0.2)

            total_time = 0.0
            
            last_deep_guidance = np.zeros(Settings.ACTION_SIZE)
            
            if Settings.AUGMENT_STATE_WITH_ACTION_LENGTH > 0:                    
                # Create state-augmentation queue (holds previous actions)
                past_actions = queue.Queue(maxsize = Settings.AUGMENT_STATE_WITH_ACTION_LENGTH)
        
                # Fill it with zeros to start
                for j in range(Settings.AUGMENT_STATE_WITH_ACTION_LENGTH):
                    past_actions.put(np.zeros([Settings.NUMBER_OF_QUADS, Settings.ACTION_SIZE]), False)
            
            # Initializing
            runway_state = np.zeros([Settings.RUNWAY_LENGTH_ELEMENTS, Settings.RUNWAY_WIDTH_ELEMENTS])
            last_runway_state = np.zeros([Settings.RUNWAY_LENGTH_ELEMENTS, Settings.RUNWAY_WIDTH_ELEMENTS])            
            desired_altitudes = np.linspace(first_quad_altitude+Settings.NUMBER_OF_QUADS*2, first_quad_altitude, Settings.NUMBER_OF_QUADS, endpoint = False)
                
            while not_done:
                # TODO: make better frequency managing
                sleep(timestep)
                
                # Initializing quadrotor positions and velocities
                quad_positions = np.zeros([Settings.NUMBER_OF_QUADS, 3]) 
                quad_velocities = np.zeros([Settings.NUMBER_OF_QUADS, 3])
                
                quad_number_not_id = 0
                for rc in g.rotorcrafts:
                    
                    rc.timeout = rc.timeout + timestep                    
                    
                    """ policy_input is: [chaser_x, chaser_y, chaser_z, target_x, target_y, target_z, target_theta, 
                                          chaser_x_dot, chaser_y_dot, chaser_z_dot, (optional past action data)] 
                    """

                    # Extracting position
                    quad_positions[ quad_number_not_id, 0] =  rc.X[0]
                    quad_positions[ quad_number_not_id, 1] = -rc.X[1]
                    quad_positions[ quad_number_not_id, 2] =  rc.X[2]
                    
                    # Rotating from Inertial frame (NED frame) into body frame (runway frame)
                    quad_positions[ quad_number_not_id, :] = np.matmul(C_bI, quad_positions[ quad_number_not_id, :])
                    
                    # Scale position after rotating
                    quad_positions[ quad_number_not_id, 0] = quad_positions[ quad_number_not_id, 0]*Settings.RUNWAY_LENGTH/runway_length # scaling to the new runway length
                    quad_positions[ quad_number_not_id, 1] = quad_positions[ quad_number_not_id, 1]*Settings.RUNWAY_WIDTH/runway_width # scaling to the new runway wodth
                    
                    # Extracting velocity
                    quad_velocities[quad_number_not_id, 0] =  rc.V[0]#*Settings.RUNWAY_LENGTH/runway_length
                    quad_velocities[quad_number_not_id, 1] = -rc.V[1]#*Settings.RUNWAY_WIDTH/runway_width
                    quad_velocities[quad_number_not_id, 2] =  rc.V[2]
                    
                    # Rotating from Inertial frame (NED frame) into body frame (runway frame)
                    quad_velocities[ quad_number_not_id, :] = np.matmul(C_bI, quad_velocities[ quad_number_not_id, :])
                    
                    # Scale velocities after rotating
                    quad_velocities[quad_number_not_id, 0] = quad_velocities[quad_number_not_id, 0]*Settings.RUNWAY_LENGTH/runway_length
                    quad_velocities[quad_number_not_id, 1] = quad_velocities[quad_number_not_id, 1]*Settings.RUNWAY_WIDTH/runway_width

                    quad_number_not_id += 1
                
                
                
                # Resetting the action delay queue 
                if total_time == 0.0:                        
                    if COMMUNICATION_DELAY_LENGTH > 0:
                        communication_delay_queue = queue.Queue(maxsize = COMMUNICATION_DELAY_LENGTH + 1)
                        # Fill it with zeros initially
                        for i in range(COMMUNICATION_DELAY_LENGTH):
                            communication_delay_queue.put([quad_positions, quad_velocities], False)
                    
                    # Resetting the initial previous position to be the first position
                    previous_quad_positions = quad_positions
                
                
                # Checking if a quadrotor has failed
                for i in range(len(failure_times)):
                    if (total_time > failure_times[i]) and (not failure_acknowledged[i]):
                        if total_time == failure_times[i]:
                            print("\n\nQuad %i has failed!\n\n"%i)
                        # Force the position and velocity to their 'failed' states
                        quad_positions[i,:]  = Settings.LOWER_STATE_BOUND[:3]
                        previous_quad_positions[i,:] = quad_positions[i,:]
                        quad_velocities[i,:] = np.zeros([3])
                                    
                if COMMUNICATION_DELAY_LENGTH > 0:
                    communication_delay_queue.put([quad_positions, quad_velocities], False) # puts the current position and velocity to the bottom of the stack
                    delayed_quad_positions, delayed_quad_velocities = communication_delay_queue.get(False) # grabs the delayed position and velocity.   
                                
                ##############################################################
                ### Check the runway for new tiles that have been explored ###
                ##############################################################
                # Generate quadrotor LineStrings
                for i in range(Settings.NUMBER_OF_QUADS):
                    quad_line = LineString([quad_positions[i,:-1], previous_quad_positions[i,:-1]])
                    
                    for j in range(Settings.RUNWAY_LENGTH_ELEMENTS):
                        for k in range(Settings.RUNWAY_WIDTH_ELEMENTS):                    
                            # If this element has already been explored, skip it
                            if runway_state[j,k] == 0 and quad_line.intersects(tile_polygons[j][k]):
                                runway_state[j,k] = 1
                                #print("Quad %i traced the line %s and explored runway element length = %i, width = %i with coordinates %s" %(i,list(quad_line.coords),j,k,tile_polygons[j][k].bounds))
                    
                # Storing current quad positions for the next timestep
                previous_quad_positions = quad_positions
                
                # Print if a new tile has been explored
                if np.any(last_runway_state != runway_state):
                    print("Runway elements discovered %i/%i" %(np.sum(runway_state), Settings.RUNWAY_LENGTH_ELEMENTS*Settings.RUNWAY_WIDTH_ELEMENTS))
                    
                    # Draw a new runway
                    print(np.flip(runway_state))
                
                if np.all(runway_state) == 1:
                    print("Explored the entire runway in %.2f seconds--Congratualtions! Quitting deep guidance" %(time.time()-start_time))
                    not_done = False
                
                total_states = []
                # Building NUMBER_OF_QUADS states
                for j in range(Settings.NUMBER_OF_QUADS):
                    # Start state with your own 
                    this_quads_state = np.concatenate([quad_positions[j,:], quad_velocities[j,:]])               
                    # Add in the others' states, starting with the next quad and finishing with the previous quad
                    for k in range(j + 1, Settings.NUMBER_OF_QUADS + j):
                        if COMMUNICATION_DELAY_LENGTH > 0:
                            this_quads_state = np.concatenate([this_quads_state, delayed_quad_positions[k % Settings.NUMBER_OF_QUADS,:], delayed_quad_velocities[k % Settings.NUMBER_OF_QUADS,:]])
                        else:
                            this_quads_state = np.concatenate([this_quads_state, quad_positions[k % Settings.NUMBER_OF_QUADS,:], quad_velocities[k % Settings.NUMBER_OF_QUADS,:]])
                    
                    # All quad data is included, now append the runway state and save it to the total_state
                    total_states.append(this_quads_state)

                # Augment total_state with past actions, if appropriate
                if Settings.AUGMENT_STATE_WITH_ACTION_LENGTH > 0:
                    # total_states = [Settings.NUMBER_OF_QUADS, Settings.TOTAL_STATE_SIZE]
                    # Just received a total_state from the environment, need to augment 
                    # it with the past action data and return it
                    # The past_action_data is of shape [Settings.AUGMENT_STATE_WITH_ACTION_LENGTH, Settings.NUMBER_OF_QUADS, Settings.TOTAL_STATE_SIZE]
                    # I swap the first and second axes so that I can reshape it properly
            
                    past_action_data = np.swapaxes(np.asarray(past_actions.queue),0,1).reshape([Settings.NUMBER_OF_QUADS, -1]) # past actions reshaped into rows for each quad    
                    total_states = np.concatenate([np.asarray(total_states), past_action_data], axis = 1)
            
                    # Remove the oldest entry from the action log queue
                    past_actions.get(False)
                
                # Concatenating the runway to the augmented state
                total_states = np.concatenate([total_states, np.tile(runway_state.reshape(-1),(Settings.NUMBER_OF_QUADS,1))], axis = 1)
                
                total_state_to_log = total_states[0,:]

                # Normalize the state
                if Settings.NORMALIZE_STATE:
                    total_states = (total_states - Settings.STATE_MEAN)/Settings.STATE_HALF_RANGE

                # Discarding irrelevant states
                observations = np.delete(total_states, Settings.IRRELEVANT_STATES, axis = 1)

                # Run processed state through the policy
                deep_guidance = sess.run(actor.action_scaled, feed_dict={state_placeholder:observations}) # deep guidance = [ chaser_x_acceleration [runway north], chaser_y_acceleration [runway west], chaser_z_acceleration [up] ]
                
                # Adding the action taken to the past_action log
                if Settings.AUGMENT_STATE_WITH_ACTION_LENGTH > 0:
                    past_actions.put(deep_guidance)

                # Limit guidance commands if velocity is too high!
                # Checking whether our velocity is too large AND the acceleration is trying to increase said velocity... in which case we set the desired_linear_acceleration to zero.
                for j in range(Settings.NUMBER_OF_QUADS):
                    #print(quad_velocities[j,0:2], (np.abs(quad_velocities[j,0:2]) > Settings.VELOCITY_LIMIT) & (np.sign(deep_guidance[j,:]) == np.sign(quad_velocities[j,0:2])), "Unsaturated: ", deep_guidance, end=' ')
                    deep_guidance[j,(np.abs(quad_velocities[j,0:2]) > Settings.VELOCITY_LIMIT) & (np.sign(deep_guidance[j,:]) == np.sign(quad_velocities[j,0:2]))] = -2*np.sign(deep_guidance[j,(np.abs(quad_velocities[j,0:2]) > Settings.VELOCITY_LIMIT) & (np.sign(deep_guidance[j,:]) == np.sign(quad_velocities[j,0:2]))])
                    #print("Saturated: ", deep_guidance, end =' ')
                    
        
                average_deep_guidance = (last_deep_guidance + deep_guidance)/2.0
                last_deep_guidance = deep_guidance
                last_runway_state = np.copy(runway_state)
                
                # Get each quad to accelerate appropriately
                for j in range(Settings.NUMBER_OF_QUADS):
                    
                    # Checking if a quadrotor has failed
                    for i in range(len(failure_times)):                            
                        if (total_time > failure_times[i]) and (i==j):
                            if np.abs(total_time - failure_times[i]) < 0.5:
                                print("\n\nSimulated failure of quad %i\n\n"%(all_ids[i]))
                            skip_this_one = True
                            break
                        else:
                            skip_this_one = False
                    if skip_this_one:
                        continue
                    
                    # Each quad is assigned a different altitude to remain at.
                    desired_altitude = desired_altitudes[j]

                    # Rotate desired deep guidance command from body frame (runway frame) frame to inertial frame (NED frame)
                    #print("Unrotated average: ", average_deep_guidance)
                    average_deep_guidance_NED[j,:] = np.matmul(C_bI_22.T, average_deep_guidance[j,:])
                    #print("Rotated average: ", average_deep_guidance)
                    
                    
                    if dont_average_output:
                        g.accelerate(north = deep_guidance[j,0], east = -deep_guidance[j,1], down = desired_altitude, quad_id = g.ids[j])
                        print("The no_avg setting is broken")
                        raise SystemExit
                    else:
                        g.accelerate(north = average_deep_guidance_NED[j,0], east = -average_deep_guidance_NED[j,1], down = desired_altitude, quad_id = g.ids[j]) # Averaged        
                
                total_time = total_time + timestep
                # Log all input and outputs:
                t = time.time()-start_time
                log_placeholder[log_counter,0] = t
                log_placeholder[log_counter,1:3*Settings.NUMBER_OF_QUADS + 1] = np.concatenate([average_deep_guidance.reshape(-1), desired_altitudes.reshape(-1)])
                # log_placeholder[i,5:8] = deep_guidance_xf, deep_guidance_yf, deep_guidance_zf
                log_placeholder[log_counter,3*Settings.NUMBER_OF_QUADS + 1:3*Settings.NUMBER_OF_QUADS + 1 + Settings.TOTAL_STATE_SIZE + 6] = total_state_to_log
                log_counter += 1
    
            # If we ended gracefully
            exit()
        
        # If we ended forcefully
        except (KeyboardInterrupt, SystemExit): 
            print('Shutting down...')
            g.shutdown()
            sleep(0.2)
            print("Saving file as %s..." %(log_filename+"_L"+str(runway_length)+"_W"+str(runway_width)+".txt"))
            with open(log_filename+"_L"+str(runway_length)+"_W"+str(runway_width)+".txt", 'wb') as f:
                np.save(f, log_placeholder[:log_counter])
            print("Done!")
            exit()
omega_vi = bdk_cal.omega.to_list()
code_vi = list(bdk_cal.index)
nivel_vi = bdk_cal.Nivel.to_list()
pet_space = (np.linspace(0, 50, 100) / 100).round(3)
p_space = (np.linspace(-50, 50, 100) / 100).round(2)

omega_series = pd.Series(np.repeat(omega_vi,
                                   len(pet_space) * len(p_space)),
                         name="omega")
nivel_series = pd.Series(np.repeat(nivel_vi,
                                   len(pet_space) * len(p_space)),
                         name="nivel")
code_series = pd.Series(np.repeat(code_vi,
                                  len(pet_space) * len(p_space)),
                        name="code")
pet_series = pd.Series(np.tile(np.tile(pet_space, len(pet_space)),
                               len(omega_vi)),
                       name="PE")
p_series = pd.Series(np.tile(np.repeat(p_space, len(p_space)), len(omega_vi)),
                     name="P")
data_all = pd.concat(
    [nivel_series, code_series, omega_series, pet_series, p_series], axis=1)

df_vi = data_all

p_hist = bdk_cal.P.median()
ae_hist = bdk_cal.AE.median()
pet_hist = bdk_cal.PE.median()
wa_hist = p_hist - ae_hist

df_vi["PET_sp"] = (pet_hist + pet_hist * df_vi.PE)
df_vi["P_sp"] = (p_hist + p_hist * df_vi.P)
Пример #40
0
def zero_mean(xframes):
	m = np.mean(xframes,axis=1)
	xframes = xframes - np.tile(m,(xframes.shape[1],1)).T
	return xframes
Пример #41
0
    def _generate_spherical_poses(self, poses, bds):
        """Generate a 360 degree spherical path for rendering."""
        # pylint: disable=g-long-lambda
        p34_to_44 = lambda p: np.concatenate(
            [p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
        )
        rays_d = poses[:, :3, 2:3]
        rays_o = poses[:, :3, 3:4]

        def min_line_dist(rays_o, rays_d):
            a_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
            b_i = -a_i @ rays_o
            pt_mindist = np.squeeze(
                -np.linalg.inv((np.transpose(a_i, [0, 2, 1]) @ a_i).mean(0))
                @ (b_i).mean(0)
            )
            return pt_mindist

        pt_mindist = min_line_dist(rays_o, rays_d)
        center = pt_mindist
        up = (poses[:, :3, 3] - center).mean(0)
        vec0 = self._normalize(up)
        vec1 = self._normalize(np.cross([0.1, 0.2, 0.3], vec0))
        vec2 = self._normalize(np.cross(vec0, vec1))
        pos = center
        c2w = np.stack([vec1, vec2, vec0, pos], 1)
        poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
        rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
        sc = 1.0 / rad
        poses_reset[:, :3, 3] *= sc
        bds *= sc
        rad *= sc
        centroid = np.mean(poses_reset[:, :3, 3], 0)
        zh = centroid[2]
        radcircle = np.sqrt(rad ** 2 - zh ** 2)
        new_poses = []

        for th in np.linspace(0.0, 2.0 * np.pi, 120):
            camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
            up = np.array([0, 0, -1.0])
            vec2 = self._normalize(camorigin)
            vec0 = self._normalize(np.cross(vec2, up))
            vec1 = self._normalize(np.cross(vec2, vec0))
            pos = camorigin
            p = np.stack([vec0, vec1, vec2, pos], 1)
            new_poses.append(p)

        new_poses = np.stack(new_poses, 0)
        new_poses = np.concatenate(
            [
                new_poses,
                np.broadcast_to(poses[0, :3, -1:], new_poses[:, :3, -1:].shape),
            ],
            -1,
        )
        poses_reset = np.concatenate(
            [
                poses_reset[:, :3, :4],
                np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape),
            ],
            -1,
        )
        if self.split == "test":
            self.render_poses = new_poses[:, :3, :4]
        return poses_reset
Пример #42
0
def make_local_connectivity_scorr(func_img, clust_mask_img, thresh):
    """
    Constructs a spatially constrained connectivity matrix from a fMRI dataset.
    The weights w_ij of the connectivity matrix W correspond to the
    spatial correlation between the whole brain FC maps generated from the
    time series from voxel i and voxel j. Connectivity is only calculated
    between a voxel and the 27 voxels in its 3D neighborhood
    (face touching and edge touching).

    Parameters
    ----------
    func_img : Nifti1Image
        4D Nifti1Image containing fMRI data.
    clust_mask_img : Nifti1Image
        3D NIFTI file containing a mask, which restricts the voxels used in the analysis.
    thresh : str
        Threshold value, correlation coefficients lower than this value
        will be removed from the matrix (set to zero).

    Returns
    -------
    W : Compressed Sparse Matrix
        A Scipy sparse matrix, with weights corresponding to the spatial correlation between the time series from
        voxel i and voxel j

    References
    ----------
    .. Adapted from PyClusterROI
    """
    from scipy.sparse import csc_matrix
    from scipy import prod
    from itertools import product
    from pynets.fmri.clustools import indx_1dto3d, indx_3dto1d

    neighbors = np.array(sorted(sorted(sorted([list(x) for x in list(set(product({-1, 0, 1}, repeat=3)))],
                                              key=lambda k: (k[0])), key=lambda k: (k[1])), key=lambda k: (k[2])))

    # Read in the mask
    msz = clust_mask_img.shape

    # Convert the 3D mask array into a 1D vector
    mskdat = np.reshape(np.asarray(clust_mask_img.dataobj).astype('bool'), prod(msz))

    # Determine the 1D coordinates of the non-zero
    # elements of the mask
    iv = np.nonzero(mskdat)[0]
    sz = func_img.shape

    # Reshape fmri data to a num_voxels x num_timepoints array
    imdat = np.reshape(np.asarray(func_img.dataobj).astype('float32'), (prod(sz[:3]), sz[3]))

    # Mask the datset to only the in-mask voxels
    imdat = imdat[iv, :]
    imdat_sz = imdat.shape

    # Z-score fmri time courses, this makes calculation of the
    # correlation coefficient a simple matrix product
    imdat_s = np.tile(np.std(imdat, 1), (imdat_sz[1], 1)).T

    # Replace 0 with really large number to avoid div by zero
    imdat_s[imdat_s == 0] = 1000000
    imdat_m = np.tile(np.mean(imdat, 1), (imdat_sz[1], 1)).T
    imdat = (imdat - imdat_m) / imdat_s

    # Set values with no variance to zero
    imdat[imdat_s == 0] = 0
    imdat[np.isnan(imdat)] = 0

    # Remove voxels with zero variance, do this here
    # so that the mapping will be consistent across
    # subjects
    vndx = np.nonzero(np.var(imdat, 1) != 0)[0]
    iv = iv[vndx]
    m = len(iv)
    print(m, ' # of non-zero valued or non-zero variance voxels in the mask')

    # Construct a sparse matrix from the mask
    msk = csc_matrix((vndx + 1, (iv, np.zeros(m))), shape=(prod(msz), 1), dtype=np.float32)

    sparse_i = []
    sparse_j = []
    sparse_w = [[]]

    for i in range(0, m):
        if i % 1000 == 0:
            print('voxel #', i)

        # Convert index into 3D and calculate neighbors, then convert resulting 3D indices into 1D
        ndx1d = indx_3dto1d(indx_1dto3d(iv[i], sz[:-1]) + neighbors, sz[:-1])

        # Convert 1D indices into masked versions
        ondx1d = msk[ndx1d].todense()

        # Exclude indices not in the mask
        ndx1d = ndx1d[np.nonzero(ondx1d)[0]].flatten()
        ondx1d = np.array(ondx1d[np.nonzero(ondx1d)[0]])
        ondx1d = ondx1d.flatten() - 1

        # Keep track of the index corresponding to the "seed"
        nndx = np.nonzero(ndx1d == iv[i])[0]

        # Extract the time courses corresponding to the "seed"
        # and 3D neighborhood voxels
        tc = np.array(imdat[ondx1d.astype('int'), :])

        # Ensure that the "seed" has variance, if not just skip it
        if np.var(tc[nndx, :]) == 0:
            continue

        # Calculate functional connectivity maps for "seed"
        # and 3D neighborhood voxels
        R = np.corrcoef(np.dot(tc, imdat.T) / (sz[3] - 1))

        if np.linalg.matrix_rank(R) == 1:
            R = np.reshape(R, (1, 1))

        # Set nans to 0
        R[np.isnan(R)] = 0

        # Set values below thresh to 0
        R[R < thresh] = 0

        # Calculate the spatial correlation between FC maps
        if np.linalg.matrix_rank(R) == 0:
            R = np.reshape(R, (1, 1))

        # Keep track of the indices and the correlation weights
        # to construct sparse connectivity matrix
        sparse_i = np.append(sparse_i, ondx1d, 0)
        sparse_j = np.append(sparse_j, (ondx1d[nndx]) * np.ones(len(ondx1d)))
        sparse_w = np.append(sparse_w, R[nndx, :], 1)

    # Ensure that the weight vector is the correct shape
    sparse_w = np.reshape(sparse_w, prod(np.shape(sparse_w)))

    # Concatenate the i, j, and w_ij vectors
    outlist = sparse_i
    outlist = np.append(outlist, sparse_j)
    outlist = np.append(outlist, sparse_w)

    # Calculate the number of non-zero weights in the connectivity matrix
    n = len(outlist) / 3

    # Reshape the 1D vector read in from infile in to a 3xN array
    outlist = np.reshape(outlist, (3, int(n)))

    m = max(max(outlist[0, :]), max(outlist[1, :])) + 1

    # Make the sparse matrix, CSC format is supposedly efficient for matrix arithmetic
    W = csc_matrix((outlist[2, :], (outlist[0, :], outlist[1, :])), shape=(int(m), int(m)), dtype=np.float32)

    del imdat, msk, mskdat, outlist, m, sparse_i, sparse_j, sparse_w

    return W
Пример #43
0
        raw_data = raw_data[:max_considered_chunks]
        feature_rows = feature_rows[:max_considered_chunks]
        start_ids = start_ids[:max_considered_chunks]
    num_chunks = feature_rows.shape[0]
    s = raw_data.shape
    raw_data = [raw_data.reshape([-1, subchunk_size, s[-1]])]
    encodings = encoder_model.predict(raw_data, verbose=1)
    enc_columns = ['enc_' + str(i) for i in range(encodings.shape[1])]
    encodings_df = pd.DataFrame(data=encodings,
                                index=np.arange(encodings.shape[0]),
                                columns=enc_columns).astype(float)

    # Create a data frame of the notrain features
    segment_ids = np.repeat(feature_data.notrain_seg_id.values[feature_rows],
                            num_sub_per_chunk)
    start_rows = np.tile(subchunk_size * np.arange(num_sub_per_chunk),
                         num_chunks)
    if source == 'test':
        targets = -999
        targets_original = -999
        eq_ids = -999
    else:
        start_rows += np.repeat(start_ids, num_sub_per_chunk)
        targets = np.interp(np.arange(0, num_chunks, 1 / num_sub_per_chunk),
                            np.arange(num_chunks),
                            feature_data.target.values[feature_rows])
        targets_original = locals()[signal_string].time_to_failure.values[
            start_rows + subchunk_size - 1]
        eq_ids = np.repeat(feature_data.notrain_eq_id.values[feature_rows],
                           num_sub_per_chunk)
    notrain_data = utils.ordered_dict([
        'target', 'notrain_target_original', 'notrain_seg_id', 'notrain_eq_id',
Пример #44
0
def extract_genders(genders):
    return np.tile(np.reshape(genders, (-1, 1)), [1, 32])
Пример #45
0
    def track(self, img):
        w_z = self.size[0] + CONTEXT_AMOUNT * np.sum(self.size)
        h_z = self.size[1] + CONTEXT_AMOUNT * np.sum(self.size)
        s_z = np.sqrt(w_z * h_z)
        scale_z = EXEMPLAR_SIZE / s_z

        score_size = (INSTANCE_SIZE - EXEMPLAR_SIZE) // ANCHOR_STRIDE + 1 + BASE_SIZE
        hanning = np.hanning(score_size)
        window = np.outer(hanning, hanning)
        window = np.tile(window.flatten(), self.anchor_num)
        anchors = self.generate_anchor(score_size)

        s_x = s_z * (INSTANCE_SIZE / EXEMPLAR_SIZE)
        x_crop = self.get_subwindow(img, self.center_pos,
                                    INSTANCE_SIZE,
                                    round(s_x), self.channel_average)
        
        outputs = self.model.track(x_crop)

        score = self._convert_score(outputs['cls'])
        pred_bbox = self._convert_bbox(outputs['loc'], anchors)

        def change(r):
            return np.maximum(r, 1. / r)

        def sz(w, h):
            pad = (w + h) * 0.5
            return np.sqrt((w + pad) * (h + pad))

        # scale penalty
        s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
                     (sz(self.size[0]*scale_z, self.size[1]*scale_z)))

        # aspect ratio penalty
        r_c = change((self.size[0]/self.size[1]) /
                     (pred_bbox[2, :]/pred_bbox[3, :]))
        penalty = np.exp(-(r_c * s_c - 1) * PENALTY_K)
        pscore = penalty * score

        # window penalty
        pscore = pscore * (1 - WINDOW_INFLUENCE) + \
                 window * WINDOW_INFLUENCE

        # get 'best_score' and the most important 'scores' and 'boxes' and 'lr'
        best_idx = np.argmax(pscore)
        best_score = pscore[best_idx]

        best_idx16 = np.argsort(pscore)[::-1][:16] 
        best_idx16 = best_idx16[pscore[best_idx16] > pscore[best_idx]*0.95].tolist()
  
        bbox = pred_bbox[:, best_idx16] / scale_z
        lr = penalty[best_idx16] * score[best_idx16] * LR
        
        # get position and size
        if best_score >= 0.65:
            cx = bbox[0,0] + self.center_pos[0]
            cy = bbox[1,0] + self.center_pos[1]
            width = self.size[0] * (1 - lr[0]) + bbox[2,0] * lr[0]
            height = self.size[1] * (1 - lr[0]) + bbox[3,0] * lr[0]

            self.cx16 = bbox[0,:] + self.center_pos[0]
            self.cy16 = bbox[1,:] + self.center_pos[1]
            self.width16 = self.size[0] * (1 - lr) + bbox[2,:] * lr
            self.height16 = self.size[1] * (1 - lr) + bbox[3,:] * lr
        else:
            cx = self.center_pos[0]
            cy = self.center_pos[1]
            width = self.size[0]
            height = self.size[1]

            self.cx16 = np.array([cx])
            self.cy16 = np.array([cy])
            self.width16 = np.array([width])
            self.height16 = np.array([height])

        # clip boundary
        cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])

        # udpate state
        self.center_pos = np.array([cx, cy])
        self.size = np.array([width, height])

        bbox = [cx - width / 2,
                cy - height / 2,
                width,
                height]
        
        bbox16 = [self.cx16 - self.width16 / 2,
                  self.cy16 - self.height16 / 2,
                  self.width16,
                  self.height16]

        return {
                'bbox': bbox,
                'bbox16': bbox16,
                'best_score': best_score,
               }
Пример #46
0
def get_w(dataMat, labelMat, alphas):
    alphas, dataMat, labelMat = np.array(alphas), np.array(dataMat), np.array(labelMat)
    w = np.dot((np.tile(labelMat.reshape(1, -1).T, (1, 2)) * dataMat).T, alphas)
    return w.tolist()
def train_and_evaluate(training_mode, graph, model, verbose=False):
    """Helper to run the model with different training modes."""

    with tf.Session(graph=graph) as sess:
        #summary_writer = tf_basic_trials.summary.FileWriter('tensorBorad_logs/log_IMDB_To_Amazon_movies_DANN_networks', sess.graph)
        sess.run(tf.global_variables_initializer())

        # Batch generators
        #according to the paper the source domain sample with the labels know and the target domain sampels with the labels unkown
        gen_source_batch = batch_generator(
            [Amz_movies_x_train, Amz_movies_y_train], batch_size // 2)
        gen_target_batch = batch_generator(
            [tweet_x_train, tweet_y_train], batch_size // 2) #here they use hte labels of hte source training along wiht samples fro mthe target . instead of the target labels
        gen_source_only_batch = batch_generator(
            [Amz_movies_x_train, Amz_movies_y_train], batch_size)
        gen_target_only_batch = batch_generator(
            [tweet_x_train, tweet_y_train], batch_size)

        domain_labels = np.vstack([np.tile([1., 0.], [batch_size // 2, 1]),
                                   np.tile([0., 1.], [batch_size // 2, 1])])

        # Training loop
        for i in range(num_steps):

            # Adaptation param and learning rate schedule as described in the paper
            p = float(i) / num_steps
            l = 2. / (1. + np.exp(-10. * p)) - 1 #lamda
            lr = 0.01 / (1. + 10 * p)**0.75 #learning rate

            # Training step
            if training_mode == 'dann':

                X0, y0 = next(gen_source_batch)
                X1, y1 = next(gen_target_batch)
                X = np.vstack([X0, X1])
                y = np.vstack([y0, y1])


                _, batch_loss, dloss, ploss, d_acc, p_acc = sess.run([dann_train_op, total_loss, domain_loss, pred_loss, domain_acc, label_acc],
                             feed_dict={model.X: X, model.y: y, model.domain: domain_labels,
                                        model.train: True, model.l: l, learning_rate: lr})

                if verbose and i % 100 == 0:
                    print('loss: %f  d_acc: %f  p_acc: %f  p: %f  l: %f  lr: %f' % (batch_loss, d_acc, p_acc, p, l, lr))

            elif training_mode == 'source':
                X, y = next(gen_source_only_batch)
                _, batch_loss = sess.run([regular_train_op, pred_loss],
                                     feed_dict={model.X: X, model.y: y, model.train: False,
                                                model.l: l,learning_rate: lr})

            elif training_mode == 'target':
                X, y = next(gen_target_only_batch)
                _, batch_loss = sess.run([regular_train_op, pred_loss],
                                     feed_dict={model.X: X, model.y: y, model.train: False,
                                                model.l: l, learning_rate: lr})

        # Compute final evaluation on test data
        source_acc = sess.run(label_acc,
                            feed_dict={model.X: Amz_movies_x_test, model.y: Amz_movies_y_test,
                                       model.train: False})

        target_acc = sess.run(label_acc,
                            feed_dict={model.X: tweet_x_test, model.y: tweet_y_test,
                                       model.train: False})

        test_domain_acc = sess.run(domain_acc,
                            feed_dict={model.X: combined_test_txt,
                                       model.domain: combined_test_domain, model.l: 1.0})

        test_emb = sess.run(model.feature, feed_dict={model.X: combined_test_txt})

    return source_acc, target_acc, test_domain_acc, test_emb
Пример #48
0
def similarity(FLAGS, sess, all_features, all_paths):
    def select_images(distances):
        if FLAGS.similarity_input is not None:
            input_path = str(os.path.dirname(FLAGS.similarity_input))

        indices = np.argsort(distances)
        images = []
        #size = 40
        size = 2000
        image_counter = 0
        for i in range(size):
            while FLAGS.similarity_input is not None and i != 0 and \
                    str(os.path.dirname(all_paths[indices[image_counter]]).decode(encoding='UTF-8')) == input_path:
                image_counter += 1
            images += [dict(path=all_paths[indices[image_counter]],
                            index=int(indices[image_counter]),
                            distance=float(distances[indices[image_counter]]))]
            image_counter += 1
        return images

    # Distance
    x1 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    x2 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    l2diff = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x1, x2)), axis=1))

    # Init
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)

    #
    clip = 1e-3
    np.clip(all_features, -clip, clip, all_features)

    # Get distances
    result = []
    bs = 100
    # print("choosing needles")
    # print("features size:")
    # print(str(all_features.shape[0]))
    # print("paths size:")
    # print(str(len(all_paths)))
    if FLAGS.similarity_input is None:
        needles = [randint(0, all_features.shape[0]) for x in range(10)]
    else:
        input_path = str(os.path.dirname(FLAGS.similarity_input))
        needles = [i for i in range(len(all_paths)) if
                   str(os.path.dirname(all_paths[i]).decode(encoding='UTF-8')) == input_path]

    for needle in needles:
        item_block = np.reshape(np.tile(all_features[needle], bs), [bs, -1])
        distances = np.zeros(all_features.shape[0])
        for i in range(0, all_features.shape[0], bs):
            if i + bs > all_features.shape[0]:
                bs = all_features.shape[0] - i
            distances[i:i + bs] = sess.run(
                l2diff, feed_dict={x1: item_block[:bs], x2: all_features[i:i + bs]})

        # Pick best matches
        result += [select_images(distances)]

    data_json = dict(data=result)
    with open('./data.json', 'w') as f:
        json.dump(data_json, f)

    return data_json
def xpal_gain(K_c, K_x=None, S=None, A=None, alpha_x=1, alpha_c=1):
    """
    Computes the expected probabilistic gain.

    Parameters
    ----------
    K_c: array-like, shape (n_candidate_samples, n_classes)
        Kernel frequency estimate vectors of the candidate samples.
    K_x: array-like, shape (n_evaluation_samples, n_classes), optional (default=K_c))
        Kernel frequency estimate vectors of the evaluation samples.
    S: array-like, shape (n_candidate_samples, n_evaluation_samples), optional (default=np.eye(n_candidate_samples))
        Similarities between all pairs of candidate and evaluation samples
    alpha_x: array-like, shape (n_classes)
        Prior probabilities for the Dirichlet distribution of the samples in the evaluation set.
        Default is 1 for all classes.
    alpha_c: float | array-like, shape (n_classes)
        Prior probabilities for the Dirichlet distribution of the candidate samples.
        Default is 1 for all classes.

    Returns
    -------
    gains: numpy.ndarray, shape (n_candidate_samples)
        Computed expected gain for each candidate sample.
    """
    # check kernel frequency estimates of candidate samples
    n_annotators = K_c.shape[0]
    n_candidate_samples = K_c.shape[1]
    n_classes = K_c.shape[2]

    # check kernel frequency estimates of evaluation samples
    K_x = K_c if K_x is None else check_array(K_x)
    n_evaluation_samples = K_x.shape[0]
    if n_classes != K_x.shape[1]:
        raise ValueError("'K_x' and 'K_c' must have one column per class")

    # check similarity matrix
    S = np.eye(n_candidate_samples) if S is None else check_array(S)
    if S.shape[0] != n_candidate_samples or S.shape[1] != n_evaluation_samples:
        raise ValueError("'S' must have the shape (n_candidate_samples, n_evaluation_samples)")

    # check label accuracies
    A = np.ones(n_candidate_samples, n_annotators) if A is None else check_array(A)
    if A.shape[0] != n_candidate_samples or A.shape[1] != n_annotators:
        raise ValueError("'A' must have the shape (n_candidate_samples, n_annotators)")

    # check prior parameters
    if hasattr(alpha_c, "__len__") and len(alpha_c) != n_classes:
        raise ValueError("'alpha_c' must be either a float > 0 or array-like with shape (n_classes)")
    if hasattr(alpha_x, "__len__") and len(alpha_x) != n_classes:
        raise ValueError("'alpha_x' must be either a float > 0 or array-like with shape (n_classes)")

    # uniform risk matrix
    R = 1 - np.eye(n_classes)

    # compute possible risk differences
    class_vector = np.arange(n_classes, dtype=int)
    R_diff = np.array([[R[:, y_hat] - R[:, y_hat_l] for y_hat_l in class_vector] for y_hat in class_vector])

    # compute current error per evaluation sample and class
    R_x = K_x @ R

    # compute current predictions
    y_hat = np.argmin(R_x, axis=1)

    # compute required labels per class to flip decision
    with np.errstate(divide='ignore', invalid='ignore'):
        D_x = np.nanmin(np.divide(R_x - np.min(R_x, axis=1, keepdims=True), R[:, y_hat].T), axis=1)
        D_x = np.tile(D_x, (len(S), 1))

    # indicates where a decision flip can be reached
    A_max = np.sum(A, axis=1, keepdims=True)
    I = D_x - A_max * S < 0
    print('#decision_flips: {}'.format(np.sum(I)))

    # compute normalization constants per candidate sample
    K_c_alpha_c_norm = K_c + alpha_c
    K_c_alpha_c_norm /= K_c_alpha_c_norm.sum(axis=2, keepdims=True)

    # stores gain per candidate sample
    gains = np.zeros((n_candidate_samples, n_annotators))

    # compute gain for each candidate sample
    flip_indices = np.argwhere(np.sum(I, axis=1) > 0)[:, 0]
    for ik_c in flip_indices:
        for m in range(n_annotators):
            for class_idx in range(n_classes):
                norm = K_c_alpha_c_norm[:m + 1, ik_c, class_idx].prod()
                l_vec = np.zeros(n_classes)
                l_vec[class_idx] = A[ik_c, :m + 1].sum()
                K_l = (S[ik_c, I[ik_c]] * l_vec[:, np.newaxis]).T
                K_new = K_x[I[ik_c]] + K_l
                y_hat_l = np.argmin(K_new @ R, axis=1)
                K_new += alpha_x
                K_new /= np.sum(K_new, axis=1, keepdims=True)
                gains[ik_c, m] += norm * np.sum(K_new * R_diff[y_hat[I[ik_c]], y_hat_l])

    # compute average gains over evaluation samples
    gains /= n_evaluation_samples
    gains /= np.arange(1, n_annotators + 1).reshape(1, -1)
    print(np.unravel_index(gains.argmax(), gains.shape))

    return np.max(gains, axis=1)
Пример #50
0
def reconstruct_time_series_partial(images_fn, gen, noise_shapes,
    init_model, out_fn,
    time_range, h=None, last_t=None, application="mchrzc", ds_factor=16, n_ensemble=4,
    scaling_fn=path+"/../data/scale_rzc.npy", relax_lam=0.0):

    if application == "mchrzc":
        dec = data.RainRateDecoder(scaling_fn, below_val=np.log10(0.025))
    else:
        raise ValueError("Unknown application.")
    downsampler = data.LogDownsampler(min_val=dec.below_val,
        threshold_val=dec.value_range[0])

    with netCDF4.Dataset(images_fn) as ds_img:
        time = np.array(ds_img["time"][:], copy=False)
        time_dt = [datetime(1970,1,1)+timedelta(seconds=t) for t in time]
        t0 = bisect_left(time_dt, time_range[0])
        t1 = bisect_left(time_dt, time_range[1])
        images = np.array(ds_img["images"][t0:t1,...], copy=False)
        time = time[t0:t1]

    img_shape = images.shape[1:3]
    img_shape = (
        img_shape[0] - img_shape[0]%ds_factor,
        img_shape[1] - img_shape[1]%ds_factor,
    )
    noise_gen = noise.NoiseGenerator(noise_shapes(img_shape),
        batch_size=n_ensemble)

    images_ds = np.zeros(
        (images.shape[0],img_shape[0]//ds_factor,img_shape[1]//ds_factor,1),
        dtype=np.uint8
    )
    images_gen = np.zeros(
        (images.shape[0],)+img_shape+(1,n_ensemble),
        dtype=np.uint8
    )

    # this finds the nearest index in the R encoding
    def encoder():
        lR = dec.logR
        ind = np.arange(len(lR))
        ip = interp1d(lR,ind)
        def f(x):
            y = np.zeros(x.shape, dtype=np.uint8)
            valid = (x >= dec.value_range[0])
            y[valid] = ip(x[valid]).round().astype(np.uint8)
            return y
        return f
    encode = encoder()

    for k in range(images.shape[0]):
        print("{}/{}".format(k+1,images.shape[0]))
        img_real = images[k:k+1,:img_shape[0],:img_shape[1],:]
        img_real = dec(img_real)
        img_real = img_real.reshape(
            (1,1)+img_real.shape[1:])
        img_real[np.isnan(img_real)] = dec.below_val
        img_ds = downsampler(img_real)
        img_ds = dec.normalize(img_ds)
        img_ds_denorm = dec.denormalize(img_ds)
        img_ds = np.tile(img_ds, (n_ensemble,1,1,1,1))

        (n_init, n_update) = noise_gen()
            
        if (h is None) or (time[k]-last_t != 600):
            h = init_model.predict([img_ds[:,0,...], n_init])
            
        (img_gen,h) = gen.predict([img_ds, h, n_update])
        if relax_lam > 0.0:
            # nudge h towards null
            h_null = init_model.predict([
                np.zeros_like(img_ds[:,0,...]), n_init
            ])
            h = h_null + (1.0-relax_lam)*(h-h_null)
        img_gen = dec.denormalize(img_gen)
        img_gen = img_gen.transpose((1,2,3,4,0))

        images_ds[k,...] = encode(img_ds_denorm[0,...])
        images_gen[k,...] = encode(img_gen[0,...])
        last_t = time[k]

    with netCDF4.Dataset(out_fn, 'w') as ds:
        dim_height = ds.createDimension("dim_height", img_shape[0])
        dim_width = ds.createDimension("dim_width", img_shape[1])
        dim_height_ds = ds.createDimension("dim_height_ds",
            img_shape[0]/ds_factor)
        dim_width_ds = ds.createDimension("dim_width_ds",
            img_shape[1]/ds_factor)
        dim_samples = ds.createDimension("dim_samples", images.shape[0])
        dim_ensemble = ds.createDimension("dim_ensemble", n_ensemble)
        dim_channels = ds.createDimension("dim_channels", 1)

        var_params = {"zlib": True, "complevel": 9}

        def create_var(name, dims, **params):
            dtype = params.pop("dtype", np.float32)
            var = ds.createVariable(name, dtype, dims, **params)
            return var

        var_img = create_var("images",
            ("dim_samples","dim_height","dim_width","dim_channels",
                "dim_ensemble"),
            chunksizes=(1,64,64,1,1), dtype=np.uint8, **var_params)
        var_img.units = "Encoded R"
        var_img_ds = create_var("images_ds",
            ("dim_samples","dim_height_ds","dim_width_ds","dim_channels"),
            dtype=np.uint8, **var_params)
        var_img_ds.units = "Encoded R"
        var_time = create_var("time", ("dim_samples",), 
            chunksizes=(1,), dtype=np.float64, **var_params)
        var_time.units = "Seconds since 1970-01-01 00:00"

        var_img_ds[:] = images_ds
        var_img[:] = images_gen
        var_time[:] = time

    return (h, last_t)
Пример #51
0
             False)  # prevent opendrift from making a new dynamical landmask

###############################
# PARTICLE SEEDING
###############################
# generic point release location for test

# Define the starting position of the particles within the buffer of intertidal rocky shore on the West Coast of the North Island (NZ)
nb_parts = 1
points = np.loadtxt('./Release_centroid_nat_dist_paua.xyz',
                    delimiter='\t',
                    dtype=str)
plon = points[:, 0].astype(np.float)
plat = points[:, 1].astype(np.float)
tot_parts = nb_parts * len(plon)  # total number of particles released
plon = np.tile(plon, nb_parts)
plat = np.tile(plat, nb_parts)


# Define the release time. author: Calvin Quigley (19/04/2021)
def create_seed_times(start, end, delta):
    """
  crate times at given interval to seed particles
  """
    out = []
    start_t = start
    end_t = datetime.strptime(str(end), "%Y-%m-%d %H:%M:%S")
    while start_t < end:
        out.append(start_t)
        start_t += delta
    return out
dev_sample_index = -1 * int(0.1 * float(len(Amz_movies_y)))

Amz_movies_x_train, Amz_movies_x_test = Amz_movies_x_shuffled[:dev_sample_index], Amz_movies_x_shuffled[dev_sample_index:]
Amz_movies_y_train, Amz_movies_y_test = Amz_movies_y_shuffled[:dev_sample_index], Amz_movies_y_shuffled[dev_sample_index:]

dev_sample_index = -1 * int(0.1 * float(len(tweets_y)))

tweet_x_train, tweet_x_test = tweets_x_shuffled[:dev_sample_index], tweets_x_shuffled[dev_sample_index:]
tweet_y_train, tweet_y_test = tweet_y_shuffled[:dev_sample_index], tweet_y_shuffled[dev_sample_index:]

# Create a mixed dataset for TSNE visualization
num_test = 500
combined_test_txt = np.vstack([Amz_movies_x_test[:num_test], tweet_x_test[:num_test]])
combined_test_labels = np.vstack([Amz_movies_y_test[:num_test], tweet_y_test[:num_test]]) # they use hte labels of hte source domian dataset with the target domain dataset
combined_test_domain = np.vstack([np.tile([1., 0.], [num_test, 1]), #domain labels 1st half 0 source and 2nd hald target 1
        np.tile([0., 1.], [num_test, 1])])

batch_size = 128

class MNISTModel(object):
    """Simple MNIST domain adaptation model."""
    def __init__(self):
        self._build_model()

    def _build_model(self):
        self.X_length=Amz_movies_x_train.shape[1]
        self.y_length =Amz_movies_y_train.shape[1]
        self.X = tf.placeholder(tf.int32, [None, None], name="input_x")
        self.y = tf.placeholder(tf.float32, [None, 2], name="input_y")
        self.domain = tf.placeholder(tf.float32, [None, 2])
Пример #53
0
    #zoom = (1 - (k / end))
    zoom = np.exp(-0.1 * k)

    x_min = (q - zoom * 2)
    x_max = (q + zoom)

    y_min = (w - zoom)
    y_max = (w + zoom)

    m = int(3 * res)
    n = int(2 * res)

    x = np.linspace(x_min, x_max, num=m).reshape((1, m))
    y = np.linspace(y_min, y_max, num=n).reshape((n, 1))

    C = np.tile(x, (n, 1)) + 1j * np.tile(y, (1, m))

    Z = np.zeros((n, m), dtype=complex)
    M = np.full((n, m), True, dtype=bool)
    N = np.zeros((n, m))

    for i in range(0, 100):
        Z[M] = Z[M] * Z[M] + C[M]
        M[np.abs(Z) > 2] = False
        O = np.logical_not(M)
        P = O.astype(int)
        N[O] = N[O] + P[O]
        print(i)

    im = plt.figure(frameon=False)
    #im = plt.imshow(N, cmap="terrain_r",interpolation="none")
    def get_batch(self):
        if self.no_batch_left():
            # TODO Use Log!
            logger.error(
                "There is no batch left in " + self.name + ". Consider to use iterators.begin() to rescan from " \
                                                           "the beginning of the iterators")
            return None
        input_batch = numpy.zeros((self.seq_length, self.minibatch_size) +
                                  tuple(self.data_dims)).astype(
                                      self.input_data_type)
        mask = numpy.zeros((self.seq_length, self.minibatch_size)).astype(
            theano.config.floatX) if self.use_mask else None

        if self.is_output_multilabel:
            output_batch = numpy.zeros((self.seq_length, self.minibatch_size) +
                                       tuple(self.label_dims)).astype(
                                           self.output_data_type)
        elif self.one_hot_label:
            output_batch = numpy.zeros(
                (self.seq_length,
                 self.minibatch_size)).astype(self.output_data_type)
        else:
            output_batch = numpy.zeros((self.seq_length, self.minibatch_size) +
                                       tuple(self.label_dims)).astype(
                                           self.output_data_type)

        data = None
        vid_ind_prev = -1
        for i in range(self.current_batch_size):
            batch_ind = self.current_batch_indices[i]

            start = self.frame_local_indices[batch_ind]
            frame_ind = self.frame_indices[batch_ind]
            vid_ind = self.video_indices[batch_ind]
            label = self.labels[batch_ind]
            length = self.lengths[batch_ind]
            end = start + self.seq_length * self.seq_skip

            # load data for current video
            if vid_ind != vid_ind_prev:
                data = h5py.File(
                    '%s/%s.h5' % (self.data, self.video_names[vid_ind]),
                    'r')[self.dataset_name]

            if length >= self.seq_length * self.seq_skip:
                input_batch[:, i, :] = data[start:end:self.seq_skip, :]
            else:
                n = 1 + int((length - 1) / self.seq_skip)
                input_batch[:n,
                            i, :] = data[start:start + length:self.seq_skip, :]
                input_batch[n:, i, :] = numpy.tile(
                    input_batch[n - 1, i, :],
                    (self.seq_length - n, ) + ((1, ) * len(self.data_dims)))

            if self.is_output_multilabel:
                output_batch[:, i, :] = numpy.tile(label, (self.seq_length, 1))
            elif self.one_hot_label:
                output_batch[:, i] = numpy.tile(label, (1, self.seq_length))
            else:
                output_batch[:, i, label] = 1.

            vid_ind_prev = vid_ind

        # only for testing, will change in the future
        if self.reshape:
            input_batch = input_batch.reshape([
                input_batch.shape[0], input_batch.shape[1],
                input_batch.shape[2],
                input_batch.shape[3] * input_batch.shape[4]
            ])
        # input_batch = input_batch.reshape([input_batch.shape[0], input_batch.shape[1], 49, 1024])
        # input_batch = input_batch.transpose((0,1,3,2))

        if self.use_mask:
            mask[:, :self.current_batch_size] = 1.
        input_batch = input_batch.astype(self.input_data_type)
        output_batch = output_batch.astype(self.output_data_type)

        if self.use_mask:
            return [input_batch, mask, output_batch]
        else:
            return [input_batch, output_batch]
Пример #55
0
    def __init__(self,
                 inpath,
                 word_vocab=None,
                 char_vocab=None,
                 POS_vocab=None,
                 NER_vocab=None,
                 label_vocab=None,
                 batch_size=60,
                 isShuffle=False,
                 isLoop=False,
                 isSort=True,
                 max_char_per_word=10,
                 max_sent_length=200,
                 max_hyp_length=100,
                 max_choice_length=None,
                 tolower=False,
                 gen_concat_mat=False,
                 gen_split_mat=False,
                 efficient=False,
                 random_seed=None):
        '''
        Datastream for question, passage and choice.
        isShuffle: whether to shuffle ordering of all batches
        isLoop: whether to loop again when stream ends
        isSort: whether to sort all samples according to length (speeds up computation)
        tolower: use lower lettered vocabulary
        gen_concat_mat: generate a concatenate matrix for concat represenation of question and choice
        gen_split_mat: generate a split matrix for splitting q+c representation
        efficient: Generate representation of passage only once for each passage
        random_seed: seed for shuffling batches
        '''
        if random_seed is not None:
            np.random.seed(random_seed)
        if max_choice_length is None:
            max_choice_length = max_hyp_length
        instances = []
        infile = open(inpath, 'rt', encoding='utf-8')
        for line in infile:
            line = line.strip()
            if line.startswith('-'): continue
            items = re.split("\t", line)
            label = items[0]
            if tolower:
                sentence1 = items[1].lower()
                sentence2 = items[2].lower()
                sentence3 = items[3].lower()
            else:
                sentence1 = items[1]
                sentence2 = items[2]
                sentence3 = items[3]
            if label_vocab is not None:
                label_id = label_vocab.getIndex(label)
                if label_id >= label_vocab.vocab_size: label_id = 0
            else:
                label_id = int(label)
            word_idx_1 = word_vocab.to_index_sequence(sentence1)
            word_idx_2 = word_vocab.to_index_sequence(sentence2)
            word_idx_3 = word_vocab.to_index_sequence(sentence3)
            char_matrix_idx_1 = char_vocab.to_character_matrix(sentence1)
            char_matrix_idx_2 = char_vocab.to_character_matrix(sentence2)
            char_matrix_idx_3 = char_vocab.to_character_matrix(sentence3)
            if len(word_idx_1) > max_sent_length:
                word_idx_1 = word_idx_1[:max_sent_length]
                char_matrix_idx_1 = char_matrix_idx_1[:max_sent_length]
            if len(word_idx_2) > max_hyp_length:
                word_idx_2 = word_idx_2[:max_hyp_length]
                char_matrix_idx_2 = char_matrix_idx_2[:max_hyp_length]
            if len(word_idx_3) > max_choice_length:
                word_idx_3 = word_idx_3[:max_choice_length]
                char_matrix_idx_3 = char_matrix_idx_3[:max_choice_length]
            POS_idx_1 = None
            POS_idx_2 = None
            if POS_vocab is not None:
                POS_idx_1 = POS_vocab.to_index_sequence(items[4])
                if len(POS_idx_1) > max_sent_length:
                    POS_idx_1 = POS_idx_1[:max_sent_length]
                POS_idx_2 = POS_vocab.to_index_sequence(items[5])
                if len(POS_idx_2) > max_sent_length:
                    POS_idx_2 = POS_idx_2[:max_sent_length]

            NER_idx_1 = None
            NER_idx_2 = None
            if NER_vocab is not None:
                NER_idx_1 = NER_vocab.to_index_sequence(items[6])
                if len(NER_idx_1) > max_sent_length:
                    NER_idx_1 = NER_idx_1[:max_sent_length]
                NER_idx_2 = NER_vocab.to_index_sequence(items[7])
                if len(NER_idx_2) > max_sent_length:
                    NER_idx_2 = NER_idx_2[:max_sent_length]

            instances.append(
                (label, sentence1, sentence2, sentence3, label_id, word_idx_1,
                 word_idx_2, word_idx_3, char_matrix_idx_1, char_matrix_idx_2,
                 char_matrix_idx_3, POS_idx_1, POS_idx_2, NER_idx_1,
                 NER_idx_2))
        infile.close()

        # sort instances based on sentence length
        if isSort:
            instances = sorted(
                instances,
                key=lambda instance: (len(instance[5]), len(instance[
                    6]), len(instance[7])))  # sort instances based on length
        self.num_instances = len(instances)

        # distribute into different buckets
        batch_spans = make_batches(self.num_instances, batch_size)
        self.batches = []
        for batch_index, (batch_start, batch_end) in enumerate(batch_spans):
            label_batch = []
            sent1_batch = []
            sent2_batch = []
            sent3_batch = []
            label_id_batch = []
            word_idx_1_batch = []
            word_idx_2_batch = []
            word_idx_3_batch = []
            char_matrix_idx_1_batch = []
            char_matrix_idx_2_batch = []
            char_matrix_idx_3_batch = []
            sent1_length_batch = []
            sent2_length_batch = []
            sent3_length_batch = []
            sent1_char_length_batch = []
            sent2_char_length_batch = []
            sent3_char_length_batch = []
            split_mat_batch_q = None
            split_mat_batch_c = None
            concat_mat_batch = None

            POS_idx_1_batch = None
            if POS_vocab is not None: POS_idx_1_batch = []
            POS_idx_2_batch = None
            if POS_vocab is not None: POS_idx_2_batch = []

            NER_idx_1_batch = None
            if NER_vocab is not None: NER_idx_1_batch = []
            NER_idx_2_batch = None
            if NER_vocab is not None: NER_idx_2_batch = []

            for i in range(batch_start, batch_end):
                (label, sentence1, sentence2, sentence3, label_id, word_idx_1,
                 word_idx_2, word_idx_3, char_matrix_idx_1, char_matrix_idx_2,
                 char_matrix_idx_3, POS_idx_1, POS_idx_2, NER_idx_1,
                 NER_idx_2) = instances[i]
                if (not efficient) or i % 4 == 0:
                    sent1_batch.append(sentence1)
                    sent2_batch.append(sentence2)
                    word_idx_1_batch.append(word_idx_1)
                    word_idx_2_batch.append(word_idx_2)
                    char_matrix_idx_1_batch.append(char_matrix_idx_1)
                    char_matrix_idx_2_batch.append(char_matrix_idx_2)
                    sent1_length_batch.append(len(word_idx_1))
                    sent2_length_batch.append(len(word_idx_2))
                    sent1_char_length_batch.append([
                        len(cur_char_idx) for cur_char_idx in char_matrix_idx_1
                    ])
                    sent2_char_length_batch.append([
                        len(cur_char_idx) for cur_char_idx in char_matrix_idx_2
                    ])

                sent3_batch.append(sentence3)
                label_batch.append(label)
                label_id_batch.append(label_id)
                word_idx_3_batch.append(word_idx_3)
                char_matrix_idx_3_batch.append(char_matrix_idx_3)
                sent3_length_batch.append(len(word_idx_3))
                sent3_char_length_batch.append(
                    [len(cur_char_idx) for cur_char_idx in char_matrix_idx_3])

                # if POS_vocab is not None:
                #     POS_idx_1_batch.append(POS_idx_1)
                #     POS_idx_2_batch.append(POS_idx_2)

                # if NER_vocab is not None:
                #     NER_idx_1_batch.append(NER_idx_1)
                #     NER_idx_2_batch.append(NER_idx_2)
            cur_batch_size = len(label_batch)
            if cur_batch_size == 0: continue

            if efficient:
                num_questions = cur_batch_size // num_options
                idx_list = []
                for optid in range(num_options):
                    for qid in range(num_questions):
                        idx_list.append(qid * num_options + optid)
                sent3_batch = [sent3_batch[i] for i in idx_list]
                label_batch = [label_batch[i] for i in idx_list]
                label_id_batch = [label_id_batch[i] for i in idx_list]
                word_idx_3_batch = [word_idx_3_batch[i] for i in idx_list]
                char_matrix_idx_3_batch = [
                    char_matrix_idx_3_batch[i] for i in idx_list
                ]
                sent3_length_batch = [sent3_length_batch[i] for i in idx_list]
                sent3_char_length_batch = [
                    sent3_char_length_batch[i] for i in idx_list
                ]

            # padding
            max_sent1_length = np.max(sent1_length_batch)
            max_sent2_length = np.max(sent2_length_batch)
            max_sent3_length = np.max(sent3_length_batch)

            max_char_length1 = np.max(
                [np.max(aa) for aa in sent1_char_length_batch])
            if max_char_length1 > max_char_per_word:
                max_char_length1 = max_char_per_word

            max_char_length2 = np.max(
                [np.max(aa) for aa in sent2_char_length_batch])
            if max_char_length2 > max_char_per_word:
                max_char_length2 = max_char_per_word

            max_char_length3 = np.max(
                [np.max(aa) for aa in sent3_char_length_batch])
            if max_char_length3 > max_char_per_word:
                max_char_length3 = max_char_per_word

            label_id_batch = np.array(label_id_batch)
            word_idx_1_batch = pad_2d_matrix(word_idx_1_batch,
                                             max_length=max_sent1_length)
            word_idx_2_batch = pad_2d_matrix(word_idx_2_batch,
                                             max_length=max_sent2_length)
            word_idx_3_batch = pad_2d_matrix(word_idx_3_batch,
                                             max_length=max_sent3_length)

            char_matrix_idx_1_batch = pad_3d_tensor(
                char_matrix_idx_1_batch,
                max_length1=max_sent1_length,
                max_length2=max_char_length1)
            char_matrix_idx_2_batch = pad_3d_tensor(
                char_matrix_idx_2_batch,
                max_length1=max_sent2_length,
                max_length2=max_char_length2)
            char_matrix_idx_3_batch = pad_3d_tensor(
                char_matrix_idx_3_batch,
                max_length1=max_sent3_length,
                max_length2=max_char_length3)

            sent1_length_batch = np.array(sent1_length_batch)
            sent2_length_batch = np.array(sent2_length_batch)
            sent3_length_batch = np.array(sent3_length_batch)

            if gen_concat_mat:
                if efficient:
                    tiled_sent2_length_batch = np.tile(sent2_length_batch,
                                                       num_options)
                else:
                    tiled_sent2_length_batch = sent2_length_batch
                concat_mat_batch, _ = gen_concat_indx_mat(
                    tiled_sent2_length_batch, sent3_length_batch)
                if gen_split_mat:
                    split_mat_batch_q, split_mat_batch_c = gen_split_indx_mat(
                        tiled_sent2_length_batch, sent3_length_batch)

            sent1_char_length_batch = pad_2d_matrix(
                sent1_char_length_batch, max_length=max_sent1_length)
            sent2_char_length_batch = pad_2d_matrix(
                sent2_char_length_batch, max_length=max_sent2_length)
            sent3_char_length_batch = pad_2d_matrix(
                sent3_char_length_batch, max_length=max_sent3_length)

            if POS_vocab is not None:
                POS_idx_1_batch = pad_2d_matrix(POS_idx_1_batch,
                                                max_length=max_sent1_length)
                POS_idx_2_batch = pad_2d_matrix(POS_idx_2_batch,
                                                max_length=max_sent2_length)
            if NER_vocab is not None:
                NER_idx_1_batch = pad_2d_matrix(NER_idx_1_batch,
                                                max_length=max_sent1_length)
                NER_idx_2_batch = pad_2d_matrix(NER_idx_2_batch,
                                                max_length=max_sent2_length)

            self.batches.append(
                (label_batch, sent1_batch, sent2_batch, sent3_batch,
                 label_id_batch, word_idx_1_batch, word_idx_2_batch,
                 word_idx_3_batch, char_matrix_idx_1_batch,
                 char_matrix_idx_2_batch, char_matrix_idx_3_batch,
                 sent1_length_batch, sent2_length_batch, sent3_length_batch,
                 sent1_char_length_batch, sent2_char_length_batch,
                 sent3_char_length_batch, POS_idx_1_batch, POS_idx_2_batch,
                 NER_idx_1_batch, NER_idx_2_batch, concat_mat_batch,
                 split_mat_batch_q, split_mat_batch_c))

        instances = None
        self.num_batch = len(self.batches)
        self.index_array = np.arange(self.num_batch)
        self.isShuffle = isShuffle
        if self.isShuffle: np.random.shuffle(self.index_array)
        self.isLoop = isLoop
        self.cur_pointer = 0
Пример #56
0
    def explain(self, data, feature_types, max_ite=20, operator="set_average"):
        '''
        :param data: Data
        :param feature_types: continuous feature -1;
        categorical feature different integer for the same category starts from 0 and in an ascending order
        :param max_ite: integer, number of iterations to get explanations
        :param operator: string, explanation operators
        :return:
        '''
        all_explanations = []
        def_values = np.zeros(data.shape[1])
        # maximum integer of feature types
        maximum_feature_types = feature_types[-1]
        # number of continuous features
        num_of_continuous = sum(feature_types == -1)
        # number of discrete/categorical features
        num_of_discrete = maximum_feature_types + 1
        num_of_features = num_of_continuous + num_of_discrete
        # Init def_modes for discrete features
        def_modes = np.arange(num_of_discrete)
        indices_list = []
        # assign def_modes values
        for i in range(num_of_discrete):
            indices = np.where(feature_types == i)[0]
            indices_list.append(indices)
            max_count = 0
            for j in range(len(indices)):
                cur_count = sum(data[:,indices[j]])
                if cur_count > max_count:
                    max_count = cur_count
                    max_idx = indices[j]
            def_modes[i] = max_idx
        # Get default values
        if operator == "set_zero":
            # set zero for continoust valued but mode for categorical values
            def_values = np.zeros(data.shape[1])
        elif operator == "set_average":
            # set mean for continuous values and mode for categorical values
            def_values = np.squeeze(np.asarray(np.average(data, axis=0)))
        else:
            raise ValueError('Unsupported operator: {0}.'.format(operator))
        for obs in data:
            obs = obs.reshape(1, -1)
            score = self.score_f(obs)[0, 1] - self.threshold
            # Get class of the observation
            class_val = 1 if score >= 0 else -1
            # Get relevant features to apply operators (all features are relevant here)
            relevant_f = np.arange(num_of_features)
            # Set lists of explanations
            explanations = np.zeros((0, num_of_features))
            e_list = []
            if class_val == 1 or not self.omit_default:
                # Set first combination with no operators applied
                combs = [np.full(num_of_features, False, dtype=bool)]
                # Set list of scores
                scores = [score * class_val]
                for _ in range(max_ite):
                    # Check if there are any more explanations
                    if not combs:
                        break
                    # Get next combination with the smallest score
                    comb = combs.pop(0)
                    score = scores.pop(0)
                    # Add to list of explanations if the class changed
                    if score < 0:
                        if self.prune:
                            comb = self.prune_explanation(obs, comb, def_values, def_modes, indices_list, relevant_f)
                        explanations = np.vstack((explanations, comb))
                        e_list.append(relevant_f[comb == 1].tolist())

                    else:
                        # Get possible features to apply operator
                        active_f = np.where(np.logical_not(comb))[0]
                        # Build new possible combinations (one for each operator application)
                        new_combs = np.tile(comb, (active_f.size, 1))
                        new_combs[np.arange(active_f.size), active_f] = True
                        # Remove combinations that are a superset of an explanation.
                        matches = new_combs.dot(explanations.T) - explanations.sum(axis=1)
                        are_superset = np.unique(np.where(matches >= 0)[0])
                        new_combs = np.delete(new_combs, are_superset, axis=0)
                        active_f = np.delete(active_f, are_superset, axis=0)
                        if new_combs.shape[0] == 0:
                            continue
                        # Predict scores for new combs and add them to list
                        new_obs = np.tile(obs, (new_combs.shape[0], 1))
                        # def_value_tiles = np.tile(def_values[relevant_f], (new_combs.shape[0], 1))
                        # new_obs[:, relevant_f] = np.multiply(1 - new_combs, new_obs[:, relevant_f]) + \
                        #                          np.multiply(new_combs, def_value_tiles)
                        for k in range(active_f.size):
                            if active_f[k] < num_of_continuous:
                                new_obs[k, active_f[k]] = def_values[active_f[k]]
                            else:
                                cur_index = int(active_f[k] - num_of_continuous)
                                new_obs[k, indices_list[cur_index]] = 0
                                new_obs[k, def_modes[cur_index]] = 1
                        new_scores = (self.score_f(new_obs) - self.threshold)[:, 1] * class_val
                        for j, new_score in enumerate(new_scores):
                            ix = bisect.bisect(scores, new_score)
                            scores.insert(ix, new_score)
                            combs.insert(ix, new_combs[j, :])
            all_explanations.append(e_list)
        return all_explanations
Пример #57
0
def yolo_output_to_box(y_pred, dict_in):
    # compares output from cnn with ground truth to calculate loss
    # only for one image at the moment

    n_bat = y_pred.shape[0]
    # n_bat = int(dict_in['batch_size'])
    boxsx = y_pred.shape[2]
    # boxsx = int(dict_in['boxs_x'])
    boxsy = y_pred.shape[1]
    # boxsy = int(dict_in['boxs_y'])
    anchors = dict_in['anchors']
    nanchors = anchors.shape[0]
    num_out = int(y_pred.shape[3] / nanchors)
    n_classes = num_out - 5
    # n_classes = int(dict_in['n_classes'])
    num_out = 5 + n_classes
    thresh = dict_in['threshold']

    # size of all boxes anchors and data
    size1 = [n_bat, boxsy, boxsx, nanchors, num_out]
    # size of all boxes and anchors
    size2 = [n_bat, boxsy, boxsx, nanchors]
    # number of boxes in each direction used for calculations rather than sizing so x first
    size3 = [boxsx, boxsy]

    print("size1",size1)

    # get top left position of cells
    rowz = np.divide(np.arange(boxsy), boxsy)
    colz = np.divide(np.arange(boxsx), boxsx)
    rowno = np.reshape(np.repeat(np.repeat(rowz, boxsx * nanchors), n_bat), (n_bat, boxsy, boxsx, nanchors))
    colno = np.reshape(np.repeat(np.tile(np.repeat(colz, nanchors), boxsy), n_bat), (n_bat, boxsy, boxsx, nanchors))
    tl_cell = np.stack((colno, rowno), axis=4)

    print("y_in", y_pred[0,0,0,:])
    # restructure net_output
    y_pred = np.reshape(y_pred, size1)
    print("shape2",expit(y_pred[0,0,0,:]))

    # get confidences centres sizes and class predictions from from net_output
    confs_cnn = expit(np.reshape(y_pred[:, :, :, :, 4], size2))
    cent_cnn = expit(y_pred[:, :, :, :, 0:2])
    # cent_cnn_in = cent_cnn
    # add to cent_cnn so is position in whole image
    cent_cnn = np.add(cent_cnn, tl_cell)
    # divide so position is relative to whole image
    cent_cnn = np.divide(cent_cnn, size3)

    size_cnn = y_pred[:, :, :, :, 2:4]
    # size is to power of prediction
    size_cnn = np.exp(size_cnn)
    # keep for loss
    # size_cnn_in = size_cnn
    # adjust so size is relative to anchors
    size_cnn = np.multiply(size_cnn, anchors)
    # adjust so size is relative to whole image
    size_cnn = np.divide(size_cnn, size3)
    class_cnn = expit(y_pred[:, :, :, :, 5:])

    boxes_out = pd.DataFrame(columns=['xc', 'yc', 'wid', 'hei'])
    scores_out = []
    classes_out = []

    print("max", np.max(confs_cnn))

    for img in range(n_bat):
        for yc in range(boxsy):
            for xc in range(boxsx):
                for ab in range(nanchors):
                    #print(confs_cnn[img, yc, xc, ab])
                    if confs_cnn[img, yc, xc, ab] > thresh:
                        boxes_out.loc[len(boxes_out)] = [cent_cnn[img, yc, xc, ab, 0], cent_cnn[img, yc, xc, ab, 1],
                                                         size_cnn[img, yc, xc, ab, 0], size_cnn[img, yc, xc, ab, 1]]
                        scores_out.append(confs_cnn[img, yc, xc, ab])
                        class_out = np.argmax(class_cnn[img, yc, xc, ab, :])
                        classes_out.append(class_out)

    output = [boxes_out, scores_out, classes_out]

    return output
Пример #58
0
    def melspec(y,
                fr,
                nfft,
                frame_size=0.100,
                frame_stride=0.050,
                basicFreq=440,
                progressbarObject=None):
        if progressbarObject:
            progressbarObject.setProperty("indeterminate", True)
            progressbarObject.setProperty("value", 0.0)
        import numpy
        NFFT = nfft
        sample_rate = fr
        nfilt = 128
        # algorithm from https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
        frame_length, frame_step = frame_size * sample_rate, frame_stride * \
            sample_rate  # Convert from seconds to samples
        signal_length = len(y)
        frame_length = int(round(frame_length))
        frame_step = int(round(frame_step))
        # Make sure that we have at least 1 frame
        num_frames = int(
            numpy.ceil(
                float(numpy.abs(signal_length - frame_length)) / frame_step))

        pad_signal_length = num_frames * frame_step + frame_length
        z = numpy.zeros((pad_signal_length - signal_length))
        # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal
        pad_signal = numpy.append(y, z)

        indices = numpy.tile(numpy.arange(
            0, frame_length), (num_frames, 1)) + numpy.tile(
                numpy.arange(0, num_frames * frame_step, frame_step),
                (frame_length, 1)).T
        frames = pad_signal[indices.astype(numpy.int32, copy=False)]
        frames *= numpy.hamming(frame_length)
        mag_frames = numpy.absolute(numpy.fft.rfft(
            frames, NFFT))  # Magnitude of the FFT
        pow_frames = ((1.0 / NFFT) * ((mag_frames)**2))  # Power Spectrum
        # --------
        hz_points = numpy.array([0] + libs.const.pitch +
                                [22050]) * basicFreq / 440
        bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)
        fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
        for m in range(1, nfilt + 1):
            f_m_minus = int(bin[m - 1])  # left
            f_m = int(bin[m])  # center
            f_m_plus = int(bin[m + 1])  # right

            for k in range(f_m_minus, f_m):
                fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
            for k in range(f_m, f_m_plus):
                fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
        filter_banks = numpy.dot(pow_frames, fbank.T)
        filter_banks = numpy.where(filter_banks == 0,
                                   numpy.finfo(float).eps,
                                   filter_banks)  # Numerical Stability
        filter_banks = filter_banks / nfft * 2.6e7
        # filter_banks = 20 * numpy.log10(filter_banks)  # dB
        if progressbarObject:
            progressbarObject.setProperty("indeterminate", False)
            progressbarObject.setProperty("value", 1.0)
        return filter_banks
Пример #59
0
    def setup_method(self, method):
        np.random.seed(11235)
        nanops._USE_BOTTLENECK = False

        arr_shape = (11, 7, 5)

        self.arr_float = np.random.randn(*arr_shape)
        self.arr_float1 = np.random.randn(*arr_shape)
        self.arr_complex = self.arr_float + self.arr_float1 * 1j
        self.arr_int = np.random.randint(-10, 10, arr_shape)
        self.arr_bool = np.random.randint(0, 2, arr_shape) == 0
        self.arr_str = np.abs(self.arr_float).astype("S")
        self.arr_utf = np.abs(self.arr_float).astype("U")
        self.arr_date = np.random.randint(0, 20000, arr_shape).astype("M8[ns]")
        self.arr_tdelta = np.random.randint(0, 20000,
                                            arr_shape).astype("m8[ns]")

        self.arr_nan = np.tile(np.nan, arr_shape)
        self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
        self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
        self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
        self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])

        self.arr_inf = self.arr_float * np.inf
        self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])

        self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
        self.arr_float_nan_inf = np.vstack(
            [self.arr_float, self.arr_nan, self.arr_inf])
        self.arr_nan_nan_inf = np.vstack(
            [self.arr_nan, self.arr_nan, self.arr_inf])
        self.arr_obj = np.vstack([
            self.arr_float.astype("O"),
            self.arr_int.astype("O"),
            self.arr_bool.astype("O"),
            self.arr_complex.astype("O"),
            self.arr_str.astype("O"),
            self.arr_utf.astype("O"),
            self.arr_date.astype("O"),
            self.arr_tdelta.astype("O"),
        ])

        with np.errstate(invalid="ignore"):
            self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
            self.arr_complex_nan = np.vstack(
                [self.arr_complex, self.arr_nan_nanj])

            self.arr_nan_infj = self.arr_inf * 1j
            self.arr_complex_nan_infj = np.vstack(
                [self.arr_complex, self.arr_nan_infj])

        self.arr_float_2d = self.arr_float[:, :, 0]
        self.arr_float1_2d = self.arr_float1[:, :, 0]

        self.arr_nan_2d = self.arr_nan[:, :, 0]
        self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
        self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
        self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]

        self.arr_float_1d = self.arr_float[:, 0, 0]
        self.arr_float1_1d = self.arr_float1[:, 0, 0]

        self.arr_nan_1d = self.arr_nan[:, 0, 0]
        self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
        self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
        self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
Пример #60
0
def do_hough(image,inner_radius, outer_radius, steps, org_centers=None,method='prewitt',save=False,
            dhtr=10,normalize = False,verbose=False,Otsu = None,threshold = 0.15):
    '''
    Calculates the position and radious of the solar disk in a set of input images using the Hough transform.

    Parameters
    ----------
    image : (K, N, M) ndarray
        List or numpy array of K Grayscale images of NxM size.
    inner_radius : int
        Minimum search radious 
    outer_radius : int
        Maximum search radious 
    steps: int
        Number of steps to look for solar radius. 
        step is used to generate:
            (1): coarse find jumps: np.linspace(inner_radius, outer_radius, steps)
            (2): width of the ring for crosscorrelating the disk: (outer_radius - inner_radius)//steps * 2
            (3): if step is a negative number then uses FM find model
                -#-
                4 iterations
                1] inner_radius = 152;  outer_radius = 1048; steps = 64; 15 iterations 
                152_____________600_____________1048
                --|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--
                2] inner_radius = Prev.Radius-32;  outer_radius = Prev.Radius+32; steps = 16; 5 iterations 
                ---------|---------------|---------------|---------------|---------------|--------
                3] inner_radius = Prev.Radius-8;  outer_radius = Prev.Radius+8; steps = 4; 5 iterations 
                -----------|---------------|---------------|---------------|---------------|-----------
                4] inner_radius = Prev.Radius-2;  outer_radius = Prev.Radius+2; steps = 1; 5 iterations 
                -----------|---------------|---------------|---------------|---------------|-----------
                -#-
    org_centers = org_centers: numpy array [K,2] centers for comparison (they are not used)
    method = method: method for finding the limb boundary. default = 'prewitt'
        more info look FindEdges()
    save = False: save the centers as 'hough_centers.txt' -> ASCII (centers_fine,radii_fine)
    dhtr = 10:
    normalize = False:
    verbose = False:
    Otsu = None:
    threshold = 0.15:

    Returns
    -------
    centers : numpy int array of [K,2] elements where [i,0] = x-centers and [i,1] = y-centers
    radious : numpy int array of [K] elements containing the radious of the K-images in pixels 

    Raises
    ------

    References
    ----------
    [1] C. Hollitt, Machine Vision and Applications (2013) 24:683–694 DOI 10.1007/s00138-012-0420-x

    Examples
    --------
    >>> import SPGPylibs as spg

    Notes
    -----
    '''
    imsize = image[0].shape
    n_images = len(image)
    if org_centers is None:
        org_centers = np.tile(np.array([0., 0.], dtype=np.int16), (n_images, 1))
  
    ############################
    #Normalize images (using a box 100x100 in the central image)
    ############################

    if normalize == True:
        norma = np.mean(image[0][imsize[0]//2-100:imsize[0]//2 +
                            100, imsize[0]//2-100:imsize[0]//2+100])
        if verbose == True:
            print('Normalization constant: ', norma, '[calculated with first image assumed to be central one]')

        for i in range(n_images):
            image[i] = image[i]/norma

    ############################
    #CALCULATE THE MASK GRADIENT FOR EACH IMAGE
    ############################

    binmask = []
    image_dummy, threshold = FindEdges(
        image[0], threshold, method=method, dthr=dhtr, verbose=verbose,Otsu=Otsu)
    binmask.append(image_dummy)

    for i in range(1, n_images):
        image_dummy = FindEdges(
            image[i], threshold, method=method, verbose=verbose,Otsu=Otsu)
        binmask.append(image_dummy)

    ############################
    #FIND CENTERS - COARSE SEARCH
    ############################
    #Coarse and fine compressed in one call

    # centers = []
    # radius = []
    # r_width_coarse = (outer_radius - inner_radius)//steps * 2
    # radii_coarse = np.linspace(inner_radius, outer_radius, steps)
    # print('Analizing ', n_images, ' images (coarse search)')

    # for i in range(n_images): 
    # #acc_conv = find_Circles(
    # #    binmask[i], radii_coarse, r_width_coarse, verbose=verbose, full=True)
    #     acc_conv = find_Circles_ida(binmask[i], radii_coarse, r_width_coarse)
    #     center,rad,c,d = votes(acc_conv, radii_coarse)

    #     centers.append(center)
    #     radius.append(rad)
    #     print('Found center: ', centers[i], ' and radius: ', radius[i])
    #     if verbose == True:
    #         fig = plt.figure(frameon=False)
    #         im1 = plt.imshow(binmask[i], cmap=plt.cm.gray, alpha=.5)
    #         circle_fit = bin_annulus(
    #             imsize, radius[i], 1, full=False).astype(float)
    #         dd = np.array(centers[i])
    #         dx = dd[0] - imsize[0]//2
    #         dy = dd[1] - imsize[1]//2
    #         circle_fit = shift(circle_fit, shift=[dx,dy])
    #         im2 = plt.imshow(circle_fit, cmap=plt.cm.gray, alpha=.5)
    #         plt.show()

    # print('Image |   Original  |  Inferred   |   Radius')
    # for i in range(n_images):
    #     print("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
    #         (i, org_centers[i, 0], org_centers[i, 1],
    #         centers[i][1], centers[i][0], radius[i]))

    ############################
    #FIND CENTERS - FINE SEARCH
    ############################

    # centers_fine = []
    # radius_fine = []

    # mean_r = np.mean(radius)
    # print('pp',mean_r)
    # inner_radius = mean_r-20
    # outer_radius = mean_r+20
    # steps = 20
    # r_width_fine = 5
    # radii_fine = np.linspace(inner_radius, outer_radius, steps)
    # print('Analizing ', n_images, ' images (fine case)')

    # for i in range(n_images):
    #     acc_conv = find_Circles_ida(binmask[i], radii_fine, r_width_fine,verbose=False)
    #     center,rad,c,d = votes(acc_conv, radii_fine)
    #     centers_fine.append(center)
    #     radius_fine.append(rad)
    #     print('Found center: ', centers_fine[i],
    #         ' and radius: ', radius_fine[i])
    #     if verbose == True:
    #         fig = plt.figure(frameon=False)
    #         im1 = plt.imshow(binmask[i], cmap=plt.cm.gray, alpha=.5)
    #         circle_fit = bin_annulus(
    #             imsize, radius_fine[i], 1, full=False).astype(float)
    #         dd = np.array(center)
    #         dx = dd[0] - imsize[0]//2
    #         dy = dd[1] - imsize[1]//2
    #         circle_fit = shift(circle_fit, shift=[dx,dy])
    #         im2 = plt.imshow(circle_fit, cmap=plt.cm.gray, alpha=.5)
    #         plt.show()

    # print('Method  |  Image |   Original  |  Inferred   |   Radius')
    # for i in range(n_images):
    #     print(" Coarse  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
    #         (i, org_centers[i, 0], org_centers[i, 1],
    #         centers[i][1], centers[i][0], radius[i]))
    #     print(" Fine    %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
    #         (i, org_centers[i, 0], org_centers[i, 1],
    #         centers_fine[i][1], centers_fine[i][0], radius_fine[i]))
    
    if steps > 0:
        ############################# 
        #FIND CENTERS - COARSE SEARCH
        #############################
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))

        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            printc("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
        ###########################
        #FIND CENTERS - FINE SEARCH
        ###########################
        mean_r = np.int(np.mean(radius))
        inner_radius = mean_r - 32
        outer_radius = mean_r + 32
        steps = 16
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))

        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            printc("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
        ################################
        #FIND CENTERS - VERY FINE SEARCH
        ################################
        mean_r = np.int(np.mean(radius))
        inner_radius = mean_r - 4
        outer_radius = mean_r + 4
        steps = 8
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))

        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            printc("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
    elif steps < 0:
        ##################################
        #FIND CENTERS - FM SEARCH STRATEGY
        ##################################
        r_width = 2
        inner_radius = 128
        outer_radius = 1024
        steps = 32
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))
        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            print("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]))
        mean_r = np.int(np.mean(radius))
        inner_radius = mean_r - 32
        outer_radius = mean_r + 32
        steps = 16 
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))
        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            print("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]))
        mean_r = np.int(np.mean(radius))
        inner_radius = mean_r - 8
        outer_radius = mean_r + 8
        steps = 8 
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))
        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            print("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]))
        mean_r = np.int(np.mean(radius))
        inner_radius = mean_r - 2
        outer_radius = mean_r + 2
        steps = 4
        r_width = (outer_radius - inner_radius)//steps * 2
        print(np.linspace(inner_radius, outer_radius, steps + 1))
        printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
        centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
        print('Image |   Original  |  Inferred   |   Radius')
        for i in range(n_images):
            print("  %2.0f  | (%4.0f,%4.0f) | (%4.0f,%4.0f) |  %6.2f" %
                (i, org_centers[i, 0], org_centers[i, 1],
                centers[i][1], centers[i][0], radius[i]))
    else:
        print('NO HOUGH **** WRONG')

    if save == True:
        status = write_shifts('hough_centers.txt', (centers,radius))
        if status != 1:
            print('Error in write_shifts')

    return centers, radius