Exemplo n.º 1
0
def closest_matrix_from_V(vpos_mat, spos_mat, c2pos_mat, c3pos_mat, inputfile):
    close_array = np.array([1000])
    v = vpos_mat[1:]
    c2 = c2pos_mat[1:]
    c3 = c3pos_mat[1:]
    s = spos_mat[1:]
    n = len(c2) / (len(s))
    f = len(s) / len(v)
    for i in range(0, (len(v))):
        a = np.array([float(v[i][0]), float(v[i][1]), float(v[i][2])])
        for su in range(0, f):
            #b=np.array([float(s[su+i*f][0]),float(s[su+i*f][1]),float(s[su+i*f][2])])
            #close_array=np.append(close_array,[u.length(np.subtract(a,b))],axis=0)
            for x in range(0, n):
                b = np.array([
                    float(c2[x + n * su + n * f * i][0]),
                    float(c2[x + n * su + n * f * i][1]),
                    float(c2[x + n * su + n * f * i][2])
                ])
                close_array = np.append(close_array,
                                        [u.length(np.subtract(a, b))],
                                        axis=0)
            b = np.array([
                float(c3[su + i * f][0]),
                float(c3[su + i * f][1]),
                float(c3[su + i * f][2])
            ])
            close_array = np.append(close_array, [u.length(np.subtract(a, b))],
                                    axis=0)
    return close_array
Exemplo n.º 2
0
def angle_between(v, s, c):
    normal = np.subtract(s, v)
    inter = np.subtract(c, s)
    theta = np.arccos(
        np.dot(normal, inter) / (u.length(normal) * u.length(inter)))
    theta = np.degrees(theta)
    return theta
Exemplo n.º 3
0
def angle_between(vec1, vec2):
    x = np.dot(vec1, vec2) / (u.length(vec1) * u.length(vec2))
    if (x > 1.0):
        x = 1.0
    #print u.length(vec1)
    #print u.length(vec2)
    theta = np.arccos(x)
    theta = np.degrees(theta)
    #print theta
    return theta
Exemplo n.º 4
0
    def collide_guides(self, car):
        # guide vectors:
        vectors = car.guidelines_centered()
        intersects = []

        (outer, inner) = self.getlines()
        outersegments = self._line_segments(outer)
        innersegments = self._line_segments(inner)

        p = []
        for v in vectors:
            vectorpoints = []
            vectorpoints += self._collide_lines(v, outersegments)
            vectorpoints += self._collide_lines(v, innersegments)

            if len(vectorpoints) > 0:
                minlength = 10000000
                point = vectorpoints[0]

                for x in vectorpoints:
                    l = util.length(car.rect.center, x)
                    if (l < minlength):
                        minlength = l
                        point = x

                p += [point]

        return p
Exemplo n.º 5
0
    def move(self, e):
        pos = np.array([e.x, e.y],dtype=float)
        centred = pos-self.size/2
        new_colour = self.colour.copy()
        if self.selected == 0:
            self.canvas.delete(self.cursors[1])
            pos[1] = max(10,min(pos[1],self.size-10))
            self.move_cursor(1, self.size+10, pos[1])
            val = 1 - (pos[1]-10) / (self.size-20)
            self.refresh(val)

            new_colour[2] = val
        elif self.selected == 1:
            sat = util.length(centred) / self.size * 2

            if sat > 1:
                self.move_cursor(0, *(centred*(1/sat) + self.size/2))
            else:
                self.move_cursor(0, *pos)

            new_colour[0] = (math.atan2(centred[1], centred[0]) / math.pi / 2) % 1
            new_colour[1] = min(sat, 1)
        else:
            return
        self.update_textboxes(new_colour)
Exemplo n.º 6
0
def position_matrix_from_V(vpos_mat,Aupos_mat, spos_mat,c2pos_mat,c3pos_mat, inputfile):
    close_array=np.array([[100,100,100]])
    v=vpos_mat[1:]
    au=Aupos_mat[1:]
    c2=c2pos_mat[1:]
    c3=c3pos_mat[1:]
    s=spos_mat[1:]
    thetax=0
    thetay=0
    thetaz=0
    correct=[0,0,2]
    n=len(c2)/(len(s))
    f=len(s)/len(v)
    for i in range(0,(len(v))):
         a=np.array([float(v[i][0]),float(v[i][1]),float(v[i][2])])
         for d in range(i*122,(i+1)*122):
                 e=np.array([float(au[d][0]),float(au[d][1]),float(au[d][2])])
                 if(abs(u.length(np.subtract(a,e))-2)<.00001):
                     thetaz=rv.angle_difference(correct[1],correct[0],e[1],e[0])
                     thetay=rv.angle_difference(correct[2],correct[0],e[2],e[0])
                     thetax=rv.angle_difference(correct[2],correct[1],e[2],e[1])
                     d=-1
         for su in range(0,f):
             b=np.array([float(s[su+i*f][0]),float(s[su+i*f][1]),float(s[su+i*f][2])])
             close_array=np.append(close_array,[np.subtract(rv.move_point(thetax,thetay,thetaz,b),a)],axis=0)
             for x in range(0,n):
                 b=np.array([float(c2[x+n*su+n*f*i][0]),float(c2[x+n*su+n*f*i][1]),float(c2[x+n*su+n*f*i][2])])
                 close_array=np.append(close_array,[np.subtract(rv.move_point(thetax,thetay,thetaz,b),a)],axis=0)
             b=np.array([float(c3[su+i*f][0]),float(c3[su+i*f][1]),float(c3[su+i*f][2])])
             close_array=np.append(close_array,[np.subtract(rv.move_point(thetax,thetay,thetaz,b),a)],axis=0)
    return close_array
Exemplo n.º 7
0
 def check_collisions(self):
     for player in self.players:
         coll = self.track.collide(player.sprite)
         if (coll): player.kill()
         points = self.track.collide_guides(player.sprite)
         player.sprite.intersectpoints = points
         self.loop_inputs[player] = [
             util.length(player.sprite.rect.center, x) for x in points
         ]
Exemplo n.º 8
0
def compute_length_array(trkfile=None, streams=None, savefname = 'lengths.npy'):
    if streams is None and not trkfile is None:
        print("Compute length array for fibers in %s" % trkfile)
        streams, hdr = tv.read(trkfile, as_generator = True)
        n_fibers = hdr['n_count']
        if n_fibers == 0:
            msg = "Header field n_count of trackfile %s is set to 0. No track seem to exist in this file." % trkfile
            print(msg)
            raise Exception(msg)
    else:
        n_fibers = len(streams)
        
    leng = np.zeros(n_fibers, dtype = np.float)
    for i,fib in enumerate(streams):
        leng[i] = length(fib[0])
    
    # store length array
    np.save(savefname, leng)
    print("Store lengths array to: %s" % savefname)
    
    return leng
Exemplo n.º 9
0
def compute_length_array(trkfile=None, streams=None, savefname='lengths.npy'):
    if streams is None and not trkfile is None:
        print("Compute length array for fibers in %s" % trkfile)
        streams, hdr = tv.read(trkfile, as_generator=True)
        n_fibers = hdr['n_count']
        if n_fibers == 0:
            msg = "Header field n_count of trackfile %s is set to 0. No track seem to exist in this file." % trkfile
            print(msg)
            raise Exception(msg)
    else:
        n_fibers = len(streams)

    leng = np.zeros(n_fibers, dtype=np.float)
    for i, fib in enumerate(streams):
        leng[i] = length(fib[0])

    # store length array
    np.save(savefname, leng)
    print("Store lengths array to: %s" % savefname)

    return leng
Exemplo n.º 10
0
def stft_v2(Buffer, f_n):
    w_n = f_n
    Buff_len = length(Buffer)  # Signal length
    # Frequency axis
    f_n = math.ceil(f_n / 2) * 2 + 1
    Lf = (f_n - 1) / 2
    # Time axis
    w_n = math.ceil(w_n / 2) * 2 + 1
    Lw = (w_n - 1) / 2
    # Initialize Spectrum to zero with appropriate size
    Spec = np.zeros((f_n, Buff_len), dtype=np.complex64)
    ## Sliding window over signal
    for iterr in range(Buff_len):
        i_l = min([iterr, Lw, Lf])
        i_r = min([Buff_len - iterr - 1, Lw, Lf])
        iter_ind = np.arange(-i_l, i_r + 1)
        ind1 = (iter_ind + iterr).astype(
            'int')  # Time Indexing of the original signal
        ind = (iter_ind + Lf).astype('int')  # Frequency Indexing of the martix
        Spec[ind, iterr] = Buffer[ind1]
    # Computing FFT of stacked Windows
    Spec = np.fft.fft(Spec.T).T
    Spec = Spec * 2 / f_n  # normalizing the FFTs
    return Spec
Exemplo n.º 11
0
def cmat(intrk,
         roi_volumes,
         parcellation_scheme,
         compute_curvature=True,
         additional_maps={},
         output_types=['gPickle'],
         atlas_info={}):
    """ Create the connection matrix for each resolution using fibers and ROIs. """

    # create the endpoints for each fibers
    en_fname = 'endpoints.npy'
    en_fnamemm = 'endpointsmm.npy'
    #ep_fname  = 'lengths.npy'
    curv_fname = 'meancurvature.npy'
    #intrk = op.join(gconf.get_cmp_fibers(), 'streamline_filtered.trk')
    print('Opening file :' + intrk)
    fib, hdr = nibabel.trackvis.read(intrk, False)

    if parcellation_scheme != "Custom":
        resolutions = get_parcellation(parcellation_scheme)
    else:
        resolutions = atlas_info

    # Previously, load_endpoints_from_trk() used the voxel size stored
    # in the track hdr to transform the endpoints to ROI voxel space.
    # This only works if the ROI voxel size is the same as the DSI/DTI
    # voxel size.  In the case of DTI, it is not.
    # We do, however, assume that all of the ROI images have the same
    # voxel size, so this code just loads the first one to determine
    # what it should be
    firstROIFile = roi_volumes[0]
    firstROI = nibabel.load(firstROIFile)
    roiVoxelSize = firstROI.get_header().get_zooms()
    (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize, True)
    np.save(en_fname, endpoints)
    np.save(en_fnamemm, endpointsmm)

    # only compute curvature if required
    if compute_curvature:
        meancurv = compute_curvature_array(fib)
        np.save(curv_fname, meancurv)

    print("========================")

    n = len(fib)

    #resolution = gconf.parcellation.keys()

    streamline_wrote = False
    for parkey, parval in resolutions.items():
        #if parval['number_of_regions'] != 83:
        #    continue

        print("Resolution = " + parkey)

        # create empty fiber label array
        fiberlabels = np.zeros((n, 2))
        final_fiberlabels = []
        final_fibers_idx = []

        # Open the corresponding ROI
        print("Open the corresponding ROI")
        for vol in roi_volumes:
            if parkey in vol:
                roi_fname = vol
                print roi_fname
        #roi_fname = roi_volumes[r]
        #r += 1
        roi = nibabel.load(roi_fname)
        roiData = roi.get_data()

        # Create the matrix
        nROIs = parval['number_of_regions']
        print("Create the connection matrix (%s rois)" % nROIs)
        G = nx.Graph()

        # add node information from parcellation
        gp = nx.read_graphml(parval['node_information_graphml'])
        for u, d in gp.nodes_iter(data=True):
            G.add_node(int(u), d)
            # compute a position for the node based on the mean position of the
            # ROI in voxel coordinates (segmentation volume )
            G.node[int(u)]['dn_position'] = tuple(
                np.mean(np.where(roiData == int(d["dn_correspondence_id"])),
                        axis=1))

        dis = 0

        # prepare: compute the measures
        t = [c[0] for c in fib]
        h = np.array(t, dtype=np.object)

        mmap = additional_maps
        mmapdata = {}
        for k, v in mmap.items():
            da = nibabel.load(v)
            mmapdata[k] = (da.get_data(), da.get_header().get_zooms())

        print("Create the connection matrix")
        pc = -1
        for i in range(n):  # n: number of fibers

            # Percent counter
            pcN = int(round(float(100 * i) / n))
            if pcN > pc and pcN % 1 == 0:
                pc = pcN
                print('%4.0f%%' % (pc))

            # ROI start => ROI end
            try:
                startROI = int(roiData[
                    endpoints[i, 0, 0], endpoints[i, 0, 1],
                    endpoints[i, 0,
                              2]])  # endpoints from create_endpoints_array
                endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1],
                                     endpoints[i, 1, 2]])
            except IndexError:
                print(
                    "An index error occured for fiber %s. This means that the fiber start or endpoint is outside the volume. Continue."
                    % i)
                continue

            # Filter
            if startROI == 0 or endROI == 0:
                dis += 1
                fiberlabels[i, 0] = -1
                continue

            if startROI > nROIs or endROI > nROIs:
                #                print("Start or endpoint of fiber terminate in a voxel which is labeled higher")
                #                print("than is expected by the parcellation node information.")
                #                print("Start ROI: %i, End ROI: %i" % (startROI, endROI))
                #                print("This needs bugfixing!")
                continue

            # Update fiber label
            # switch the rois in order to enforce startROI < endROI
            if endROI < startROI:
                tmp = startROI
                startROI = endROI
                endROI = tmp

            fiberlabels[i, 0] = startROI
            fiberlabels[i, 1] = endROI

            final_fiberlabels.append([startROI, endROI])
            final_fibers_idx.append(i)

            # Add edge to graph
            if G.has_edge(startROI, endROI):
                G.edge[startROI][endROI]['fiblist'].append(i)
            else:
                G.add_edge(startROI, endROI, fiblist=[i])

        print(
            "Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)"
            % (dis, dis * 100.0 / n, n))
        print("Valid fibers: %i (%f percent)" %
              (n - dis, 100 - dis * 100.0 / n))

        # create a final fiber length array
        finalfiberlength = []
        for idx in final_fibers_idx:
            # compute length of fiber
            finalfiberlength.append(length(fib[idx][0]))

        # convert to array
        final_fiberlength_array = np.array(finalfiberlength)

        # make final fiber labels as array
        final_fiberlabels_array = np.array(final_fiberlabels, dtype=np.int32)

        # update edges
        # measures to add here
        for u, v, d in G.edges_iter(data=True):
            G.remove_edge(u, v)
            di = {
                'number_of_fibers': len(d['fiblist']),
            }

            # additional measures
            # compute mean/std of fiber measure
            idx = np.where((final_fiberlabels_array[:, 0] == int(u))
                           & (final_fiberlabels_array[:, 1] == int(v)))[0]
            di['fiber_length_mean'] = float(
                np.mean(final_fiberlength_array[idx]))
            di['fiber_length_std'] = float(np.std(
                final_fiberlength_array[idx]))

            # this is indexed into the fibers that are valid in the sense of touching start
            # and end roi and not going out of the volume
            idx_valid = np.where((fiberlabels[:, 0] == int(u))
                                 & (fiberlabels[:, 1] == int(v)))[0]
            for k, vv in mmapdata.items():
                val = []
                for i in idx_valid:
                    # retrieve indices
                    try:
                        idx2 = (h[i] / vv[1]).astype(np.uint32)
                        val.append(vv[0][idx2[:, 0], idx2[:, 1], idx2[:, 2]])
                    except IndexError, e:
                        print "Index error occured when trying extract scalar values for measure", k
                        print "--> Discard fiber with index", i, "Exception: ", e
                        print "----"

                da = np.concatenate(val)
                di[k + '_mean'] = float(da.mean())
                di[k + '_std'] = float(da.std())
                del da
                del val

            G.add_edge(u, v, di)

        # storing network
        if 'gPickle' in output_types:
            nx.write_gpickle(G, 'connectome_%s.gpickle' % parkey)
        if 'mat' in output_types:
            # edges
            size_edges = (parval['number_of_regions'],
                          parval['number_of_regions'])
            edge_keys = G.edges(data=True)[0][2].keys()

            edge_struct = {}
            for edge_key in edge_keys:
                edge_struct[edge_key] = nx.to_numpy_matrix(G, weight=edge_key)

            # nodes
            size_nodes = parval['number_of_regions']
            node_keys = G.nodes(data=True)[0][1].keys()

            node_struct = {}
            for node_key in node_keys:
                if node_key == 'dn_position':
                    node_arr = np.zeros([size_nodes, 3], dtype=np.float)
                else:
                    node_arr = np.zeros(size_nodes, dtype=np.object_)
                node_n = 0
                for _, node_data in G.nodes(data=True):
                    node_arr[node_n] = node_data[node_key]
                    node_n += 1
                node_struct[node_key] = node_arr

            scipy.io.savemat('connectome_%s.mat' % parkey,
                             mdict={
                                 'sc': edge_struct,
                                 'nodes': node_struct
                             })
        if 'graphml' in output_types:
            g2 = nx.Graph()
            for u_gml, v_gml, d_gml in G.edges_iter(data=True):
                g2.add_edge(u_gml, v_gml, d_gml)
            for u_gml, d_gml in G.nodes(data=True):
                g2.add_node(
                    u_gml, {
                        'dn_correspondence_id': d_gml['dn_correspondence_id'],
                        'dn_fsname': d_gml['dn_fsname'],
                        'dn_hemisphere': d_gml['dn_hemisphere'],
                        'dn_name': d_gml['dn_name'],
                        'dn_position_x': float(d_gml['dn_position'][0]),
                        'dn_position_y': float(d_gml['dn_position'][1]),
                        'dn_position_z': float(d_gml['dn_position'][2]),
                        'dn_region': d_gml['dn_region']
                    })
            nx.write_graphml(g2, 'connectome_%s.graphml' % parkey)

        print("Storing final fiber length array")
        fiberlabels_fname = 'final_fiberslength_%s.npy' % str(parkey)
        np.save(fiberlabels_fname, final_fiberlength_array)

        print("Storing all fiber labels (with orphans)")
        fiberlabels_fname = 'filtered_fiberslabel_%s.npy' % str(parkey)
        np.save(
            fiberlabels_fname,
            np.array(fiberlabels, dtype=np.int32),
        )

        print("Storing final fiber labels (no orphans)")
        fiberlabels_noorphans_fname = 'final_fiberlabels_%s.npy' % str(parkey)
        np.save(fiberlabels_noorphans_fname, final_fiberlabels_array)

        if not streamline_wrote:
            print("Filtering tractography - keeping only no orphan fibers")
            finalfibers_fname = 'streamline_final.trk'
            save_fibers(hdr, fib, finalfibers_fname, final_fibers_idx)
Exemplo n.º 12
0
def active_sess_dechirp(x_1):
    #ACTIVE_SESS_DECHIRP Summary of this function goes here
    #   Detailed explanation goes here

    SF = param_configs(1)
    BW = param_configs(2)
    Fs = param_configs(3)
    N = int(2**SF)
    upsampling_factor = int(Fs / BW)

    DC = np.conj(sym_to_data_ang([1], N))
    DC_fft = np.fft.fft(DC)
    DC_upsamp = np.fft.ifft(
        np.concatenate([
            DC_fft[:N // 2 + 1],
            np.zeros((upsampling_factor - 1) * N), DC_fft[N // 2 + 1:N]
        ]))

    peak_gain = []
    uplink_wind = []
    n = []
    p = []
    last_wind = 0
    win_jump_factor = 3
    front_buf = 6 * win_jump_factor
    back_buf = 3 * win_jump_factor
    win_jump = math.floor(N * upsampling_factor / win_jump_factor)
    mov_thresh_wind = 1000 * win_jump_factor
    mov_thresh = 0
    mov_thresh_rec = []
    for i in tqdm(range(math.floor(length(x_1) / win_jump) - win_jump_factor)):

        wind = x_1[i * win_jump:i * win_jump + (N * upsampling_factor)]
        wind_fft = np.abs(np.fft.fft(wind * DC_upsamp))  # 17 sec
        wind_fft = wind_fft[np.concatenate([np.arange(N//2, dtype=int), \
                                            np.arange((N//2 + (upsampling_factor-1)*N), (upsampling_factor)*N, dtype=int)])]
        noise_floor = np.mean(wind_fft)  # 4 sec
        n.append(noise_floor)
        fft_peak = wind_fft.max(0)

        p.append(fft_peak)
        peak_gain.append(10 * math.log10(fft_peak / noise_floor))

        if i + 1 > mov_thresh_wind:
            mov_thresh = 1.3 * np.mean(peak_gain[-mov_thresh_wind + 1:])
            if mov_thresh > 6:
                mov_thresh = 6
        else:
            mov_thresh = 1.3 * np.mean(peak_gain)
            if mov_thresh > 6:
                mov_thresh = 6
        mov_thresh_rec.append(mov_thresh)

        if peak_gain[-1] >= mov_thresh:
            if i + 1 > last_wind:
                if i - back_buf < 1:
                    uplink_wind.append([1, i + 1 + front_buf])
                else:
                    uplink_wind.append([i + 1 - back_buf, i + 1 + front_buf])
                last_wind = uplink_wind[-1][1]
            elif i + 1 <= last_wind:
                uplink_wind[-1][1] = i + 1 + front_buf
                last_wind = uplink_wind[-1][1]
    uplink_wind = np.array(uplink_wind)
    uplink_wind = uplink_wind[((uplink_wind[:, 1] - uplink_wind[:, 0]) !=
                               (front_buf + back_buf)).nonzero()[0], :]
    uplink_wind = uplink_wind[((uplink_wind[:, 1] - uplink_wind[:, 0]) !=
                               (front_buf + back_buf + 1)).nonzero()[0], :]
    uplink_wind = uplink_wind[((uplink_wind[:, 1] - uplink_wind[:, 0]) !=
                               (front_buf + back_buf - 1)).nonzero()[0], :]
    temp_link = uplink_wind
    uplink_wind = uplink_wind * win_jump

    if uplink_wind[-1, 1] > length(x_1):
        uplink_wind[-1, 1] = length(x_1)

    return uplink_wind
Exemplo n.º 13
0
def dnsamp_buff(Data_stack,Upchirp_ind):
    # load Parameters
    SF = param_configs(1)
    BW = param_configs(2)
    Fs = param_configs(3)
    N = int(2**SF)
    num_preamble = param_configs(4)
    num_sync = param_configs(5)
    num_DC = param_configs(6)
    DC = np.conj(sym_to_data_ang([1],N))

    ####################################
    ##  Compute and Correct Frequency Offsets for each Preamble Detected in each Data_stack and Find the Peak Statistics needed for demodulation

    Up_ind = []
    peak_amp = []
    Data_buff = []
    ffo = []
    FFO = []
    # n_pnt is the fft Factor - fft(Signal, n_pnt * length(Signal))
    n_pnt = 16
    peak_stats = []
    # iterate over all Upchirps that qualified 8 consecutive Peak condition
    for k in range(Upchirp_ind.shape[0]):
        if(Upchirp_ind[k,0] - N <= 0):
            peak_stats.append([])
            continue
        inn = []
        k_peak_stats = []
        Data_freq_off = []
        # iterate overall downsampled buffers
        for m in range(Data_stack.shape[0]):
            data_wind = []
            data_fft = []
            freq_off = []
            # ind_temp contains the Frequency Bins around bin 1 where a
            # Preamble Peak can lie
            ind_temp = np.concatenate([np.arange(5*n_pnt), np.arange((N*n_pnt)-(4*n_pnt)-1, (N*n_pnt))])
            # iterate over all Preambles
            c = []
            for j in range(num_preamble):
                data_wind = Data_stack[m,int(Upchirp_ind[k,0]) - 1 : int(Upchirp_ind[k,0] + (num_preamble*N) -1)]
                data_fft.append(abs(np.fft.fft(data_wind[((j)*N):((j+1)*N)] * DC[:N],n_pnt*N)))
                
                c.append(data_fft[j][ind_temp].argmax(0))
                c[j] = ind_temp[c[j]] + 1
                # Handle -ve and +ve Frequency Offsets Accordingly
                if(c[j] > (n_pnt*N)/2):
                    freq_off.append(( (N*n_pnt) - c[j] ) / n_pnt)
                else:
                    freq_off.append(-1*( c[j] - 1 ) / n_pnt)
            # average the frequency offset of 6 middle Preambles
            freq_off = np.sum( freq_off[1:7] ) / (num_preamble - 2)
            ffo.append(freq_off)
            # Correct for the Frequency Offset in corresponding Data_Stack
            Data_freq_off.append(Data_stack[m,:] * np.exp( (1j*2*math.pi*(freq_off / N)) * np.arange(1, length(Data_stack[m,:]) + 1) ))
            # ind_temp contains the Frequency Bins around bin 1 where a
            # Preamble Peak can lie, assumption (-5*BW/2^SF <= Freq_off <= 5*BW/2^SF)
            ind_temp = np.concatenate([range(5), range(N-4, N)])
            a = []
            c = []
            data_wind = []
            data_fft = []
            # for the frequency offset corrected Data Stack, find FFT of Preamble to get Peak Statistics 
            for j in range(num_preamble):
                data_wind = Data_freq_off[m][int(Upchirp_ind[k,0]) - 1 : int(Upchirp_ind[k,0] + (num_preamble*N)) - 1]
                data_fft.append(abs(np.fft.fft(data_wind[(j)*N : (j+1)*N] * DC[:N],N)))
                [aj,cj] = data_fft[j][ind_temp].max(0), data_fft[j][ind_temp].argmax(0)
                a.append(aj); c.append(cj)

                c[j] = ind_temp[c[j]]
            k_peak_stats.append([np.mean(a), np.var(a, ddof=1), np.std(a, ddof=1)])
            
            ##  Find the Right Data_stack to work with
            # first find the stft of given stack at the Preamble Region,
            # Spec is a 2D Matrix, rows - Freq. Bins & Col. - Time Samples
            Spec = stft_v1(Data_freq_off[m][int(Upchirp_ind[k,0] - N)-1:int(Upchirp_ind[k,-1] + N - 1 - N)],N,DC[:N],0,0)
            temp = []
            freq_track_qual = []
            pream_peak_ind = []
            adj_ind = []
            # row_ind contains the Frequency Rows around bin 1 where a
            # Preamble Peak can lie
            row_ind = np.concatenate([range(N-6,N), range(0,6)])
            count = 1
            for i in np.nditer(row_ind):
                temp.append(np.sum(np.abs(Spec[i,:])))
                count = count + 1
            temp = np.array(temp)
            # Frequency Track in row containing Preamble should have
            # maximum energy
            ind = temp.argmax(0)
            pream_peak_ind = row_ind[ind]
            # Find row indices for Preamble row + 1 & - 1
            adj_ind = np.array([np.mod(pream_peak_ind-1+1,N), np.mod(pream_peak_ind+1+1,N)]) # plus 1 for index conversion
            if(np.sum(adj_ind == 0) == 1):
                adj_ind[(adj_ind == 0).nonzero()] = N
            # A good quality frequency track for a preamble is one that has
            # least energy leakage in adjacent rows (this promises very sharp FFT peaks)
            adj_ind -= 1 # subtract 1 to convert back to Python indices
            freq_track_qual = ( np.sum(np.abs(Spec[pream_peak_ind,:])) - np.sum(np.abs(Spec[adj_ind[0],:])) ) + ( np.sum(np.abs(Spec[pream_peak_ind,:])) - np.sum(np.abs(Spec[adj_ind[1],:])) )
            inn.append(freq_track_qual)
        inn = np.array(inn)
        peak_stats.append(k_peak_stats)
        Data_freq_off = np.array(Data_freq_off)
        # choosing the best Data_stack based on maximum energy difference from
        # adjacent bins
        b = inn.argmax(0)
        # output frequency offset corrected buffer with relevant, Peak
        # statistics and frequency offsets
        Data_buff.append(Data_freq_off[b,:])
        FFO.append(ffo[b])
        peak_amp.append(peak_stats[k][b])
        Up_ind.append(Upchirp_ind[k,:])
    Data_buff = np.array(Data_buff)
    Up_ind = np.array(Up_ind)
    peak_amp = np.array(peak_amp)
    return [Data_buff, peak_amp, Up_ind, FFO]
Exemplo n.º 14
0
Arquivo: scene.py Projeto: yychiang/xy
 def visible(self, eye, point):
     v = util.sub(eye, point)
     o = point
     d = util.normalize(v)
     t = self.intersect(o, d, 0, util.length(v))
     return t is None
Exemplo n.º 15
0
def draw_shaded(loops, base_colour, rim_colour, distance):
    gl.glDepthFunc(gl.GL_ALWAYS)
    gl.glColor3fv(base_colour)
    gl.glBegin(gl.GL_TRIANGLES)
    for tri in util.triangulate(loops):
        for point in tri:
            gl.glVertex3f(*point, -1)
    gl.glEnd()

    gl.glDepthFunc(gl.GL_LEQUAL)
    gl.glBegin(gl.GL_TRIANGLES)
    for loop in loops:
        for a, b, c in zip(np.roll(loop, -1, 0), loop, np.roll(loop, 1, 0)):
            normal = np.array([b[1] - a[1], a[0] - b[0]])
            l = util.length(normal)
            if l == 0:
                continue
            normal /= l

            inset_a = a - normal * distance
            inset_b = b - normal * distance

            gl.glColor3fv(rim_colour)
            gl.glVertex3f(*a, 0)
            gl.glVertex3f(*b, 0)
            gl.glColor3fv(base_colour)
            gl.glVertex3f(*inset_a, -1)

            gl.glVertex3f(*inset_a, -1)
            gl.glVertex3f(*inset_b, -1)
            gl.glColor3fv(rim_colour)
            gl.glVertex3f(*b, 0)
    gl.glEnd()

    for loop in loops:
        for a, b, c in zip(np.roll(loop, -1, 0), loop, np.roll(loop, 1, 0)):
            if util.is_convex(a, b, c):
                continue
            normals = [
                np.array([b[1] - a[1], a[0] - b[0]]),
                np.array([c[1] - b[1], b[0] - c[0]]),
            ]
            normals = [norm / util.length(norm) for norm in normals]

            for _ in range(2):
                new_normals = []
                for normA, normB in zip(normals, normals[1:]):
                    new = normA + normB
                    new /= util.length(new)
                    new_normals.append(new)

                for i, norm in enumerate(new_normals):
                    normals.insert(1 + 2 * i, norm)

            gl.glBegin(gl.GL_TRIANGLE_FAN)
            gl.glColor3fv(rim_colour)
            gl.glVertex3f(*b, 0)

            gl.glColor3fv(base_colour)

            for norm in normals:
                point = b - norm * distance
                gl.glVertex3f(*point, -1)

            gl.glEnd()

    gl.glColor3f(0, 0, 0)
    for loop in loops:
        gl.glBegin(gl.GL_LINE_LOOP)
        for point in loop:
            gl.glVertex3f(*point, 1)
        gl.glEnd()
Exemplo n.º 16
0
        # Find the vertices of the bounding box from the lines
        v = (u.intersection(le[0], se[0]), 
            u.intersection(le[0], se[1]), 
            u.intersection(le[1], se[0]), 
            u.intersection(le[1], se[1]))

        # Show the raw vertices without epsilon
        if SHOW:
            plt.scatter(np.array(v)[:,0], np.array(v)[:,1])

        # Calculate the diagonal lines of the bounding box
        diags = (u.line(v[0], v[3]), u.line(v[1], v[2]))

        # Calculate the center, width, height, and angle of the bounding box
        c = u.intersection(diags[0], diags[1])      # Find the center by looking at the intersection
        w = u.length(v[0], v[1]) + EPSILON          # Find the width
        h = u.length(v[0], v[2]) + EPSILON          # Find the height
        a = u.angle(v[3], v[2])                     # Find the angle

        wBucket = u.bucketCount(wBucket, w, 10)
        hBucket = u.bucketCount(hBucket, h, 10)
        aBucket = u.bucketCount(aBucket, a, 10)

        if SHOW:
            plt.scatter(c[0], c[1])
            plt.annotate(a, (c[0], c[1]))

        line = str(c[0]) + " " + str(c[1]) + " " + str(w) + " " + str(h) + " " + str(label) + " " + str(a) + "\n"
        rboxLines.append(line)

        if PRINT:
Exemplo n.º 17
0
if os.path.exists(symbols_ground_truth_path):
    sym = np.loadtxt(symbols_ground_truth_path)
    calculate_SER = True
else:
    calculate_SER = False

# Generating a Downchirp
DC = np.conj(sym_to_data_ang([1],N))

# parse complex data
x_1 = np.fromfile(in_file_path, dtype=np.complex64)

# file duration
t = np.arange(len(x_1)) / Fs

x_1 = x_1[:int(math.floor(length(x_1) / upsampling_factor) * upsampling_factor)]

x_1_dnsamp = x_1[::int(upsampling_factor)]        
file_dur = length(x_1) / Fs

print('Active session dechirping:')
uplink_wind = active_sess_dechirp(x_1) - 1 # subtract 1 for indexing
print('Detected ', len(uplink_wind), ' active sessions')

demod_sym_stack = []
Peaks = []
for m in range(uplink_wind.shape[0]):
    print('Packet ' + str(m+1) + '/' + str(uplink_wind.shape[0]))
    # DC correlations to find LoRa pkts out of collision
    temp_buff = []
    temp_buff = x_1[int(uplink_wind[m, 0]) : int(uplink_wind[m, 1]) + 1]
def four_body_func(vecs, t, *args):

    pos_1 = vecs[0:2]
    pos_2 = vecs[2:4]
    pos_3 = vecs[4:6]
    pos_4 = vecs[6:8]
    vel_1 = vecs[8:10]
    vel_2 = vecs[10:12]
    vel_3 = vecs[12:14]
    vel_4 = vecs[14:16]

    acc_1_2 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_2, pos_1)), [args[1]] ),
                                     math.pow(u.length(u.position_sub(pos_2, pos_1)), 3))
    acc_2_1 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_1, pos_2)), [args[0]] ),
                                     math.pow(u.length(u.position_sub(pos_2, pos_1)), 3))
    acc_3_1 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_1, pos_3)), [args[0]] ),
                                     math.pow(u.length(u.position_sub(pos_3, pos_1)), 3))
    acc_1_3 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_3, pos_1)), [args[2]] ),
                                     math.pow(u.length(u.position_sub(pos_3, pos_1)), 3))
    acc_4_1 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_1, pos_4)), [args[0]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_1)), 3))
    acc_1_4 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_4, pos_1)), [args[3]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_1)), 3))
    acc_3_2 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_2, pos_3)), [args[1]] ),
                                     math.pow(u.length(u.position_sub(pos_3, pos_2)), 3))
    acc_2_3 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_3, pos_2)), [args[2]] ),
                                     math.pow(u.length(u.position_sub(pos_3, pos_2)), 3))
    acc_4_2 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_2, pos_4)), [args[1]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_2)), 3))
    acc_2_4 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_4, pos_2)), [args[3]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_2)), 3))
    acc_3_4 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_4, pos_3)), [args[3]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_3)), 3))
    acc_4_3 = u.position_div_scalars(u.position_mult_scalars( (u.position_sub(pos_3, pos_4)), [args[3]] ),
                                     math.pow(u.length(u.position_sub(pos_4, pos_3)), 3))

    acc_1 = u.sum_vectors([acc_1_2, acc_1_3, acc_1_4])
    acc_2 = u.sum_vectors([acc_2_1, acc_2_3, acc_2_4])
    acc_3 = u.sum_vectors([acc_3_1, acc_3_2, acc_3_4])
    acc_4 = u.sum_vectors([acc_4_1, acc_4_2, acc_4_3])

    result = np.array(u.flatten([vel_1.tolist(), vel_2.tolist(), vel_3.tolist(), vel_4.tolist(),
                                  acc_1, acc_2, acc_3, acc_4]))
    #print "Result of func: ", result
    return result
        chosen_marker = None

        for marker in sorted_markers:

            next_footprint_tf     = marker[1].drift_cor_data[0]
            previous_footprint_tf = self.current_map_to_footprint_tf
            
            #first marker caugth ever
            if previous_footprint_tf == None:
                previous_footprint_tf = next_footprint_tf

            change_tf = np.dot(next_footprint_tf, tfmath.inverse_matrix(previous_footprint_tf))
            change_tf_tran, change_tf_rot = util.tran_and_euler_from_matrix(change_tf)
            time_diff = (rospy.Time.now() - self.last_update_timestamp).to_sec()

            distance   = util.length(change_tf_tran)
            angle_diff = change_tf_rot[2]

            rospy.loginfo("Looking at marker %s", marker[0])
            rospy.loginfo("\t{0:.3f} m, {1:.3f} radians".format(distance, angle_diff))

            '''
            lin_velocity = distance / time_diff
            ang_velocity = angle_diff / time_diff

            rospy.loginfo("\t{0:.3f} > {1:.3f}, diff: {2:.3f}".format(lin_velocity, self.max_lin_vel, lin_velocity-self.max_lin_vel))
            rospy.loginfo("\t{0:.3f} > {1:.3f}, diff: {2:.3f}".format(ang_velocity, self.max_ang_vel, ang_velocity-self.max_ang_vel))
            '''

            if time_diff < self.test_time_diff:
                rospy.loginfo("Time check less than test time, checking vels")
Exemplo n.º 20
0
def angle(v1, v2, v3, inputfile):
    vector1 = u.part_vector(v2, v1, inputfile)
    vector2 = u.part_vector(v2, v3, inputfile)
    angle = np.arccos(
        np.dot(vector1, vector2) / (u.length(vector1) * u.length(vector2)))
    return np.degrees(angle)
Exemplo n.º 21
0
def CIC_Demod(Pream_ind,Rx_Buffer,Pream_ind_stack,Peak_amp,m):
    #DEMOD
    # CIC Demodulation
    # chirp variables
    SF = param_configs(1)
    N = int(2**SF)

    # LORA pkt variables
    num_preamble = param_configs(4)
    num_sync = param_configs(5)
    num_DC = param_configs(6)
    num_data_sym = param_configs(7)

    DC = np.conj(sym_to_data_ang([1], N))

    ###########################################################################
    Pream_ind_stack = np.append(Pream_ind_stack, Pream_ind_stack[:,num_preamble-1] + N)
    ###########################################################################
    
    # for each Preamble in the Pream_ind_stack, compute exact start and end
    # indices for all the data symbols
    frm_ind = []
    for i in range(Pream_ind_stack.shape[0]):
        frm_st = Pream_ind_stack[i] + (num_preamble*N) + (num_DC*N) + (num_sync*N)
        frm_ind.append([np.arange(frm_st, frm_st+((num_data_sym-1)*N)+1, N), \
                        np.arange(frm_st+N-1, frm_st+((num_data_sym)*N)+1, N)])
    frm_ind = np.array(frm_ind) - 1 # subtract 1 for index conversion

    # for the pkt to be demodulated, find indices for each data symbol
    data_ind = []
    en_arr = []
    sym_peak = []
    Data_frame_start = Pream_ind[0] + (num_preamble*N) + (num_DC*N) + (num_sync*N)
    Data_frame_end = Data_frame_start + (num_data_sym*N)
    frame_indices = np.array([(np.arange(Data_frame_start, Data_frame_start+((num_data_sym-1)*N)+1, N)),
                                    (np.arange(Data_frame_start+N-1, Data_frame_start+((num_data_sym)*N)+1, N))]).T.astype('int') - 1 # subtract 1 for zero indexing
    
    data_ind = []
    sym_peak = []
    symbols = []
    for k in range(num_data_sym):
        ## Find interfering Symbol Boundaries
        # for current demodulation window, find the chunks of interfering
        # symbols due to collisions by determining index overlaps
        ind = []
        sym_bnd = []
        for i in range(len(frm_ind)):
            if i != m:
                st = frm_ind[i][0]
                ed = frm_ind[i][1]
                newst = st[np.intersect1d((st > frame_indices[k,0]).nonzero() , (st < frame_indices[k,1]).nonzero())]
                newed = ed[np.intersect1d((ed > frame_indices[k,0]).nonzero() , (ed < frame_indices[k,1]).nonzero())]
                if len(newst) != 0:
                    sym_bnd.append(newst)
                if len(newed) != 0:
                    sym_bnd.append(newed)
        sym_bnd = np.array(sym_bnd)
        ## CIC Filtering
        if frame_indices[k, 1] >= len(Rx_Buffer):
            symbols.append(-3)
            continue
        # standard LoRa dechirping
        data_wind = Rx_Buffer[frame_indices[k,0]:frame_indices[k,1]+1] * DC
        data_fft = np.abs(np.fft.fft(data_wind))

        # scale the dmeodulation window with appropriate Gaussian to
        # suppress the interfering symbols at window ends
        sigma = 1
        WinFun = np.exp(-(1/(2*(sigma**2)))* np.linspace(-1,1,N) ** 2)
        WinFun = WinFun / (math.sqrt(2*math.pi)*sigma)
        temp_wind = data_wind * WinFun
        sym_bnd = np.mod(sym_bnd - frame_indices[k,0],N)
        intf_wind = []
        # n_pnt is the fft Factor - fft(Signal, n_pnt * length(Signal))
        n_pnt = 4
        for i in range(len(sym_bnd)):
            buff = np.zeros((2,n_pnt*N), dtype=np.complex64)
            sym_bnd_i = int(sym_bnd[np.unravel_index(i, sym_bnd.shape, 'F')])
            buff[0,:sym_bnd_i - 1] = temp_wind[:sym_bnd_i - 1]
            buff[0,:] = np.abs(np.fft.fft(buff[0,:], n_pnt*N)) / np.sqrt(np.sum(np.abs(buff[0,:]) ** 2)) # div OKAY
            buff[1,sym_bnd_i-1:N] = temp_wind[sym_bnd_i-1:N]
            buff[1,:] = np.abs(np.fft.fft(buff[1,:],n_pnt*N)) / np.sqrt(np.sum(np.abs(buff[1,:]) ** 2))
            intf_wind = buff
        # CIC's min operation to suppress interfering symbol's Peaks and
        # then finding candidate symbols
        intf_wind = np.array(intf_wind)
        intf_wind_min_fft = np.amin(intf_wind,axis=0) if len(intf_wind) != 0 else []
        pot_sym_cic = get_max(intf_wind_min_fft,4*np.sum(intf_wind_min_fft)/(n_pnt*N),n_pnt*N)
        pot_sym_cic = np.ceil(pot_sym_cic/n_pnt)
        # Out of all peaks, find ones with in range of +- 0.5*Preamble Peak
        PwrFctr = 0.5
        PwrFlr = 4 # may try 3
        up_thresh = (Peak_amp[0] + PwrFctr*Peak_amp[0])
        low_thresh = (Peak_amp[0] - PwrFctr*Peak_amp[0])
        if(low_thresh < (PwrFlr*np.sum(data_fft)/N)): #1
            low_thresh = (PwrFlr*np.sum(data_fft)/N)
        pot_sym_pf = get_bounded_max(data_fft,up_thresh,low_thresh)
        ## Filtering Preamble of interfering Packets
        # Filter out Peaks with in current window that are appearinf repeatedly
        # in 3 consecutive windows
        if not (frame_indices[k,1] + N > len(Rx_Buffer) or frame_indices[k,1] + 2*N > len(Rx_Buffer)):
            data_wind_next_1 = Rx_Buffer[frame_indices[k,0] + N : frame_indices[k,1] + N+1] * DC
            data_wind_prev_1 = Rx_Buffer[frame_indices[k,0] - N : frame_indices[k,1] - N+1] * DC
            data_wind_next_2 = Rx_Buffer[frame_indices[k,0] + 2*N : frame_indices[k,1] + 2*N+1] * DC
            data_wind_prev_2 = Rx_Buffer[frame_indices[k,0] - 2*N : frame_indices[k,1] - 2*N+1] * DC
            temp_next_1 = np.abs(np.fft.fft(data_wind_next_1,N))
            temp_prev_1 = np.abs(np.fft.fft(data_wind_prev_1,N))
            temp_next_2 = np.abs(np.fft.fft(data_wind_next_2,N))
            temp_prev_2 = np.abs(np.fft.fft(data_wind_prev_2,N))    
            next_wind_sym_1 = get_bounded_max(temp_next_1,4*np.sum(temp_next_1)/N,N) # TODO changed these four lines
            next_wind_sym_2 = get_bounded_max(temp_next_2,4*np.sum(temp_next_2)/N,N)
            prev_wind_sym_1 = get_bounded_max(temp_prev_1,4*np.sum(temp_prev_1)/N,N)
            prev_wind_sym_2 = get_bounded_max(temp_prev_2,4*np.sum(temp_prev_2)/N,N)

            temp = []
            for i in range(length(pot_sym_pf)):
                if( (np.sum(pot_sym_pf[i] == prev_wind_sym_1) and np.sum(pot_sym_pf[i] == next_wind_sym_1))\
                        or (np.sum(pot_sym_pf[i] == prev_wind_sym_2) and np.sum(pot_sym_pf[i] == prev_wind_sym_1))\
                        or (np.sum(pot_sym_pf[i] == next_wind_sym_1) and np.sum(pot_sym_pf[i] == next_wind_sym_2)) ):
                    pass                
                else:
                    temp.append(pot_sym_pf[i])
            pot_sym_pf = np.array(temp) + 1 # convert from indices to data
        ##  Freq. Offset Filtering
        # since we have removed Frequency Offset from pkt under consideration 'Pream_ind'
        # and chosen the right downsampled buffer, the true symbol peak should
        # be the most crisp one (either use this or Choir Module(next), results should be almost similar)
        # and the interfering symbols peak may or may not be crisp
        temp = []
        for i in range(length(pot_sym_pf)):
            if sum(pot_sym_pf[i] + 1 == pot_sym_pf) or sum(pot_sym_pf[i] - 1 == pot_sym_pf): #TODO what is going on here???
                pass
            else:
                temp.append(pot_sym_pf[i])
        pot_sym = np.array(temp)
        ##  Choir Module
        # npnt = 16
        # data_fft_npnt = abs(np.fft.fft(data_wind,npnt*N))
        # FO_thresh = 0.25
        # sym_FO = []
        # temp = []
        # for i in range(length(pot_sym_pf)):
        #     ind = []
        #     if(pot_sym_pf[i] == 1):
        #         ind = np.concatenate([np.arange((N*npnt) - (npnt/2) + 1, (N*npnt)), np.arange((((pot_sym_pf[i]-1) * npnt) + 1) + (npnt/2), N*npnt)])
        #     else:
        #         ind = np.arange((((pot_sym_pf[i]-1) * npnt) + 1) - (npnt/2), (((pot_sym_pf[i]-1) * npnt) + 1) + (npnt/2) + 1)
        #     # ind OKAY
        #     ind = ind.astype('int') - 1 # convert back from data to indices
        #     a = np.argmax(data_fft_npnt[ind])
        #     sym_FO.append(abs(a - ((npnt/2)+1))/npnt)
        #     if(sym_FO[-1] < FO_thresh):
        #         temp.append(pot_sym_pf[i])
        # pot_sym = np.array(temp)
        #################################################################
        ## Make the final decision
        b = []
        if(length(sym_bnd) == 0):
            # if there is no symbol colliding with current demod window
            if(len(pot_sym) == 0):
                symbols.append(np.argmax(data_fft) + 1)
            else:
                # choose peak closest in height to Preamble Peak
                dist = abs(data_fft[pot_sym - 1] - (up_thresh + low_thresh)/2)
                b = np.argmin(dist)
                symbols.append(pot_sym[b])
        else:
            # if symbols are colliding with current demod window
            fin_sym = (np.intersect1d(pot_sym_cic,pot_sym)).astype('int')
            ##  Final Decision
            if(length(fin_sym) == 0):
                if length(pot_sym_cic) == 0 and length(pot_sym) != 0:
                    # choose peak closest in height to Preamble Peak
                    dist = abs(data_fft[pot_sym - 1] - (up_thresh + low_thresh)/2)
                    b = np.argmin(dist)
                    symbols.append(pot_sym[b])
                    
                elif length(pot_sym) == 0 and length(pot_sym_cic) != 0:
                    # make decision based on CIC's windows, correct symbol should
                    # have lowest std as it appears in all windows
                    sdev = np.std(intf_wind[:,(n_pnt * pot_sym_cic).astype('int')])
                    b = np.argmin(sdev)
                    symbols.append(pot_sym_cic[b])
                    
                elif length(pot_sym) == 0 and length(pot_sym_cic) == 0:
                    symbols.append(np.argmax(data_fft) + 1) # add one to account for zero-indexing
                else:
                    # choose peak closest in height to Preamble Peak
                    dist = abs(data_fft[pot_sym - 1] - (up_thresh + low_thresh)/2)
                    b = np.argmin(dist)
                    symbols.append(pot_sym[b])
                    
            else:
                # if intersection yields some candidates then decide based on partial STFT as following
                
                ##  Stft
                # find stft 2D matrix of follwoing dimensions
                # N frequency (rows)  x  [1 : avg_pnts      N - avg_pnts : N] (columns)
                # i.e. finding Spectrum of first 10 and last 10 time samples (Dont need to compute whole Spectrum)
                avg_pnts = 10 # number of start and end time samples to average over
                G_wind1 = data_wind
                Spec = np.zeros((N, 2*(avg_pnts+1)), dtype=np.complex64)
                for i in range(avg_pnts+1):
                    Spec[:int(N//2) + i,i] = G_wind1[:int(N//2) + i]
                    Spec[int(N//2) - (avg_pnts - i)-1:N,i+1+avg_pnts] = G_wind1[int(N//2) - (avg_pnts - i)-1:N]
                Spec = np.fft.fft(Spec, axis=0)
                # the amplitude difference at the start and end of the spectrum
                # for correct symbol should be minimum, (non-interfering symbol's
                # frequency track appears continuosly for all the columns)
                spec_slice1 = np.abs(Spec[fin_sym-1,:(avg_pnts+1)]) # TODO why minus 2??
                freq_amp = np.amin(spec_slice1,axis=1) if len(spec_slice1) != 0 else []
                spec_slice2 = np.abs(Spec[fin_sym-1,-avg_pnts-1:])
                freq_amp_end = np.amin(spec_slice2,axis=1) if len(spec_slice2) != 0 else []
                dif = np.abs(freq_amp - freq_amp_end)
                b = np.argmin(dif)
                symbols.append(fin_sym[b])
                
    return symbols
Exemplo n.º 22
0
def cmat(intrk, roi_volumes, parcellation_scheme, compute_curvature=True, additional_maps={}, output_types=['gPickle'], atlas_info = {}): 
    """ Create the connection matrix for each resolution using fibers and ROIs. """
              
    # create the endpoints for each fibers
    en_fname  = 'endpoints.npy'
    en_fnamemm  = 'endpointsmm.npy'
    #ep_fname  = 'lengths.npy'
    curv_fname  = 'meancurvature.npy'
    #intrk = op.join(gconf.get_cmp_fibers(), 'streamline_filtered.trk')
    print('Opening file :' + intrk)
    fib, hdr    = nibabel.trackvis.read(intrk, False)
    
    if parcellation_scheme != "Custom":
        resolutions = get_parcellation(parcellation_scheme)
    else:
        resolutions = atlas_info
    
    # Previously, load_endpoints_from_trk() used the voxel size stored
    # in the track hdr to transform the endpoints to ROI voxel space.
    # This only works if the ROI voxel size is the same as the DSI/DTI
    # voxel size.  In the case of DTI, it is not.  
    # We do, however, assume that all of the ROI images have the same
    # voxel size, so this code just loads the first one to determine
    # what it should be
    firstROIFile = roi_volumes[0]
    firstROI = nibabel.load(firstROIFile)
    roiVoxelSize = firstROI.get_header().get_zooms()
    (endpoints,endpointsmm) = create_endpoints_array(fib, roiVoxelSize, True)
    np.save(en_fname, endpoints)
    np.save(en_fnamemm, endpointsmm)

    # only compute curvature if required
    if compute_curvature:
        meancurv = compute_curvature_array(fib)
        np.save(curv_fname, meancurv)
    
    print("========================")
    
    n = len(fib)
    
    #resolution = gconf.parcellation.keys()

    streamline_wrote = False
    for parkey, parval in resolutions.items():
        #if parval['number_of_regions'] != 83:
        #    continue
            
        print("Resolution = "+parkey)
        
        # create empty fiber label array
        fiberlabels = np.zeros( (n, 2) )
        final_fiberlabels = []
        final_fibers_idx = []
        
        # Open the corresponding ROI
        print("Open the corresponding ROI")
        for vol in roi_volumes:
            if parkey in vol:
                roi_fname = vol
                print roi_fname
        #roi_fname = roi_volumes[r]
        #r += 1
        roi       = nibabel.load(roi_fname)
        roiData   = roi.get_data()
      
        # Create the matrix
        nROIs = parval['number_of_regions']
        print("Create the connection matrix (%s rois)" % nROIs)
        G     = nx.Graph()

        # add node information from parcellation
        gp = nx.read_graphml(parval['node_information_graphml'])
        for u,d in gp.nodes_iter(data=True):
            G.add_node(int(u), d)
            # compute a position for the node based on the mean position of the
            # ROI in voxel coordinates (segmentation volume )
            G.node[int(u)]['dn_position'] = tuple(np.mean( np.where(roiData== int(d["dn_correspondence_id"]) ) , axis = 1))

        dis = 0

        # prepare: compute the measures
        t = [c[0] for c in fib]
        h = np.array(t, dtype = np.object )
        
        mmap = additional_maps
        mmapdata = {}
        for k,v in mmap.items():
            da = nibabel.load(v)
            mmapdata[k] = (da.get_data(), da.get_header().get_zooms() )
        
        
        print("Create the connection matrix")
        pc = -1
        for i in range(n):  # n: number of fibers

            # Percent counter
            pcN = int(round( float(100*i)/n ))
            if pcN > pc and pcN%1 == 0:
                pc = pcN
                print('%4.0f%%' % (pc))
    
            # ROI start => ROI end
            try:
                startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], endpoints[i, 0, 2]]) # endpoints from create_endpoints_array
                endROI   = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], endpoints[i, 1, 2]])
            except IndexError:
                print("An index error occured for fiber %s. This means that the fiber start or endpoint is outside the volume. Continue." % i)
                continue
            
            # Filter
            if startROI == 0 or endROI == 0:
                dis += 1
                fiberlabels[i,0] = -1
                continue
            
            if startROI > nROIs or endROI > nROIs:
#                print("Start or endpoint of fiber terminate in a voxel which is labeled higher")
#                print("than is expected by the parcellation node information.")
#                print("Start ROI: %i, End ROI: %i" % (startROI, endROI))
#                print("This needs bugfixing!")
                continue
            
            # Update fiber label
            # switch the rois in order to enforce startROI < endROI
            if endROI < startROI:
                tmp = startROI
                startROI = endROI
                endROI = tmp

            fiberlabels[i,0] = startROI
            fiberlabels[i,1] = endROI

            final_fiberlabels.append( [ startROI, endROI ] )
            final_fibers_idx.append(i)

            # Add edge to graph
            if G.has_edge(startROI, endROI):
                G.edge[startROI][endROI]['fiblist'].append(i)
            else:
                G.add_edge(startROI, endROI, fiblist   = [i])
                
        print("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis*100.0/n, n) )
        print("Valid fibers: %i (%f percent)" % (n-dis, 100 - dis*100.0/n) )

        # create a final fiber length array
        finalfiberlength = []
        for idx in final_fibers_idx:
            # compute length of fiber
            finalfiberlength.append( length(fib[idx][0]) )

        # convert to array
        final_fiberlength_array = np.array( finalfiberlength )
        
        # make final fiber labels as array
        final_fiberlabels_array = np.array(final_fiberlabels, dtype = np.int32)

        # update edges
        # measures to add here
        for u,v,d in G.edges_iter(data=True):
            G.remove_edge(u,v)
            di = { 'number_of_fibers' : len(d['fiblist']), }
            
            # additional measures
            # compute mean/std of fiber measure
            idx = np.where( (final_fiberlabels_array[:,0] == int(u)) & (final_fiberlabels_array[:,1] == int(v)) )[0]
            di['fiber_length_mean'] = float( np.mean(final_fiberlength_array[idx]) )
            di['fiber_length_std'] = float( np.std(final_fiberlength_array[idx]) )

            # this is indexed into the fibers that are valid in the sense of touching start
            # and end roi and not going out of the volume
            idx_valid = np.where( (fiberlabels[:,0] == int(u)) & (fiberlabels[:,1] == int(v)) )[0]
            for k,vv in mmapdata.items():
                val = []
                for i in idx_valid:
                    # retrieve indices
                    try:
                        idx2 = (h[i]/ vv[1] ).astype( np.uint32 )
                        val.append( vv[0][idx2[:,0],idx2[:,1],idx2[:,2]] )
                    except IndexError, e:
                        print "Index error occured when trying extract scalar values for measure", k
                        print "--> Discard fiber with index", i, "Exception: ", e
                        print "----"

                da = np.concatenate( val )
                di[k + '_mean'] = float(da.mean())
                di[k + '_std'] = float(da.std())
                del da
                del val

            G.add_edge(u,v, di)

        # storing network
        if 'gPickle' in output_types:
            nx.write_gpickle(G, 'connectome_%s.gpickle' % parkey)
        if 'mat' in output_types:
            # edges
            size_edges = (parval['number_of_regions'],parval['number_of_regions'])
            edge_keys = G.edges(data=True)[0][2].keys()
            
            edge_struct = {}
            for edge_key in edge_keys:
                edge_struct[edge_key] = nx.to_numpy_matrix(G,weight=edge_key)
                
            # nodes
            size_nodes = parval['number_of_regions']
            node_keys = G.nodes(data=True)[0][1].keys()

            node_struct = {}
            for node_key in node_keys:
                if node_key == 'dn_position':
                    node_arr = np.zeros([size_nodes,3],dtype=np.float)
                else:
                    node_arr = np.zeros(size_nodes,dtype=np.object_)
                node_n = 0
                for _,node_data in G.nodes(data=True):
                    node_arr[node_n] = node_data[node_key]
                    node_n += 1
                node_struct[node_key] = node_arr
                
            scipy.io.savemat('connectome_%s.mat' % parkey, mdict={'sc':edge_struct,'nodes':node_struct})
        if 'graphml' in output_types:
            g2 = nx.Graph()
            for u_gml,v_gml,d_gml in G.edges_iter(data=True):
                g2.add_edge(u_gml,v_gml,d_gml)
            for u_gml,d_gml in G.nodes(data=True):
                g2.add_node(u_gml,{'dn_correspondence_id':d_gml['dn_correspondence_id'],
                               'dn_fsname':d_gml['dn_fsname'],
                               'dn_hemisphere':d_gml['dn_hemisphere'],
                               'dn_name':d_gml['dn_name'],
                               'dn_position_x':float(d_gml['dn_position'][0]),
                               'dn_position_y':float(d_gml['dn_position'][1]),
                               'dn_position_z':float(d_gml['dn_position'][2]),
                               'dn_region':d_gml['dn_region']})
            nx.write_graphml(g2,'connectome_%s.graphml' % parkey)

        print("Storing final fiber length array")
        fiberlabels_fname  = 'final_fiberslength_%s.npy' % str(parkey)
        np.save(fiberlabels_fname, final_fiberlength_array)

        print("Storing all fiber labels (with orphans)")
        fiberlabels_fname  = 'filtered_fiberslabel_%s.npy' % str(parkey)
        np.save(fiberlabels_fname, np.array(fiberlabels, dtype = np.int32), )

        print("Storing final fiber labels (no orphans)")
        fiberlabels_noorphans_fname  = 'final_fiberlabels_%s.npy' % str(parkey)
        np.save(fiberlabels_noorphans_fname, final_fiberlabels_array)

        if not streamline_wrote:
            print("Filtering tractography - keeping only no orphan fibers")
            finalfibers_fname = 'streamline_final.trk'
            save_fibers(hdr, fib, finalfibers_fname, final_fibers_idx)
Exemplo n.º 23
0
# fibers[0][0,:]
# first fiber, last point, x,y,z
# fibers[0][-1,:]

def test(a):
    print "me", a
#test("test")
  
print "number of fibers", len(fibers)

# Translate from mm to index
# endpoints[i,0,0] = int( endpoints[i,0,0] / float(voxelSize[0]))

# convert the first fiber to  a sequence of voxel indices i,j,k
# idx = (fibers[0] / np.array([1,1,1]) ).astype(np.int32)

# scalar values along first fiber
# myvolumedata[idx[:,0],idx[:,1],idx[:,2]]

# compute length of first fiber
length(fibers[0])

# return indices of all the fibers from 64 to 78
idx = np.where(mylabelarray == 64.78)[0]

for i in idx:
    myfiber = fiber[i]
    # happy computing

# matrix F: voxels x fibers
def DC_location_correlation(Rx_Buffer):
    #   Detecting downchirp

    SF = param_configs(1)
    BW = param_configs(2)
    Fs = param_configs(3)
    N = int(2**SF)
    num_DC = param_configs(6)
    # thresholds
    corr_threshold = param_configs(
        8)  # Threshold above which we extract all Correlation peaks
    pnts_threshold = param_configs(
        9)  # Max. # of peaks to extract from Corrrelation Plot

    DC = np.conj(sym_to_data_ang([1], N))

    Downchirp_ind = []
    Cross_Corr = []

    st = time.perf_counter()
    ###################################################
    # Cross Correlation with a Single downchirp
    # OLD:
    for i in range(length(Rx_Buffer) - length(DC) - 1):
        Cross_Corr.append(np.sum(Rx_Buffer[ i : i + N ] * DC.conj()) \
                / math.sqrt(np.sum( Rx_Buffer[ i : i + N ] * Rx_Buffer[ i : i + (N) ].conj() ) *
                np.sum( DC * DC.conj())))
    Cross_Corr = np.array(Cross_Corr)
    Cross_Corr = Cross_Corr[np.isfinite(Cross_Corr)]
    corr_threshold = 4 * np.sum(np.abs(Cross_Corr)) / length(Cross_Corr)
    ###################################################
    n_samp_array = []
    peak_ind_prev = np.array([])
    for i in range(math.floor(length(Cross_Corr) / N)):
        # windowing Cross-Correlation (window length N samples)
        wind = np.abs(Cross_Corr[i * N:(i + 1) * N])
        # Extract Multiple Correlation Peaks
        peak_ind_curr = get_max(wind, corr_threshold, pnts_threshold)
        if (length(peak_ind_prev) != 0 and length(peak_ind_curr) != 0):
            for j in range(length(peak_ind_curr)):
                for k in range(length(peak_ind_prev)):
                    # check if combination of any two peaks in consecutive window are N samples apart
                    if (peak_ind_curr[j] == peak_ind_prev[k]):
                        n_samp_array += [
                            peak_ind_prev[k] + ((i - 1) * N) + 1,
                            peak_ind_curr[j] + ((i) * N) + 1
                        ]
                    # This extracts a list of all peaks that are N samples apart
        peak_ind_prev = peak_ind_curr

    n_samp_array = np.array(n_samp_array)
    for i in range(length(n_samp_array)):
        c = 0
        ind_arr = [n_samp_array[i], n_samp_array[i] + N]

        for j in range(len(ind_arr)):
            c = c + np.sum(n_samp_array == ind_arr[j])
        # Find from the list all the peaks that appear consecutively for
        # more than 2 windows (Downchirp should give 3 peaks, N sampled apart)

        if (c >= 2):
            Downchirp_ind += [ind_arr]
    # filter Downchirps that are with in 3 samples (same pkt detected twice due to peak energy spread)
    # filter Downchirps that are with in 3 samples (same pkt detected twice due to peak energy spread)
    temp = []
    indices = [np.zeros(math.floor(num_DC))] + Downchirp_ind
    indices = np.array(indices)
    for i in range(1, indices.shape[0]):
        if (len(temp) == 0):
            temp.append(indices[i])
        else:
            if (np.min(
                    np.abs(indices[np.unravel_index(i, indices.shape, 'F')] -
                           np.array(temp)[:, 0])) > 3):
                temp.append(indices[i])
    Downchirp_ind = np.array(temp)
    return Downchirp_ind
Exemplo n.º 25
0
def UC_location_corr_DC_based(Data,DC_ind):
    # chirp variables
    SF = param_configs(1)
    BW = param_configs(2)
    Fs = param_configs(3)
    N = 2**SF
    upsampling_factor = Fs/BW

    # LORA pkt variables
    num_preamble = param_configs(4)
    num_sync = param_configs(5)
    num_DC = param_configs(6)
    num_data_sym = param_configs(7)
    # thresholds
    corr_threshold = param_configs(10)
    pnts_threshold = param_configs(11)

    DC = np.conj(sym_to_data_ang([1],N))

    ####################################
    if(len(DC_ind) == 0):
        return
    ## Find list of Potential Preambles from list of Downchirps Detected
    pot_pream_ind = []
    for i in range(len(DC_ind)):
        if DC_ind[i,0] - ((num_preamble+num_sync)*N) < 1:
            continue
        pot_pream_ind.append(np.arange(DC_ind[i,0] - ((num_preamble + num_sync)*N), DC_ind[i,0]- ((num_sync)*N) + 1, N))
    pot_pream_ind = np.array(pot_pream_ind)

    Upchirp_ind = []

    #  Cross Correlation with a Single UpChirp
    temp_wind = []
    for j in range(pot_pream_ind.shape[0]):
        if pot_pream_ind[j,0] - N <= 0:
            continue
        Data_buffer = []
        Data_buffer = Data[int(pot_pream_ind[j,0] - N) : int(pot_pream_ind[j,-1] + N)]
        temp = [0+0j]
        for i in range(length(Data_buffer) - length(DC)):
            temp.append(np.sum(np.multiply(Data_buffer[i + 1 : i + N + 1], DC[:N])) \
                / math.sqrt(np.sum(Data_buffer[i + 1: i + N + 1] * Data_buffer[i + 1 : i + N + 1].conj() ) * \
                np.sum( DC[:N] * DC[:N].conj())))
        temp_wind.append(temp)
    temp_wind = np.array(temp_wind)

    array_stack = []

    # iterate over each Downchirp Detected
    for m in range(temp_wind.shape[0]):
        
        n_samp_array = []
        peak_ind_prev = np.array([])
        for i in range(math.floor(length(temp_wind)/N)):
            # windowing Cross-Correlation Arrays correponsing to each pkt (window length N samples)
            wind = abs(temp_wind[m,i*N + 1 : (i+1) * N])
            peak_ind_curr = get_max(wind,corr_threshold,pnts_threshold)

            if length(peak_ind_prev) != 0 and length(peak_ind_curr) != 0:

                for j in range(length(peak_ind_curr)):
                    for k in range(length(peak_ind_prev)):
                        # check if combination of any two peaks in consecutive window are N samples apart
                        if abs(peak_ind_curr[j]) == abs(peak_ind_prev[k]):
                            n_samp_array.append(peak_ind_prev[k]+((i-1)*N)+(pot_pream_ind[m,0]-N-1) + 3) # add 3 to account for zero indexing
                        # This extracts a list of all peaks that are N samples apart
            peak_ind_prev = peak_ind_curr
        array_stack.append(n_samp_array)
    array_stack = np.array(array_stack)

    for m in range(len(array_stack)):
        n_samp_array = np.array(array_stack[m])
        
        for i in range(length(n_samp_array)):
            c = 0
            ind_arr = np.arange(n_samp_array[i] + N, n_samp_array[i] + N + (num_preamble-2)*N + 1, N)
            for j in range(len(ind_arr)):
                c = c + np.sum( n_samp_array == ind_arr[j] )
            # Find from the list all the peaks that appear consecutively for
            # more than 6 windows (Upchirp should give 8 peaks, N sampled apart)
            if c >= 6:
                if len(Upchirp_ind) != 0:
                    if np.sum(np.array(Upchirp_ind)[:,0] == n_samp_array[i]) != 1:
                        Upchirp_ind.append(np.concatenate([[n_samp_array[i]],ind_arr]))
                else:
                    Upchirp_ind.append(np.concatenate([[n_samp_array[i]], ind_arr]))
    # filter Upchirps that are with in 5 samples (same pkt detected multiple times due to peak energy spread)
    temp = []
    Upchirp_ind = np.array(Upchirp_ind)
    indices = np.concatenate([np.zeros((1,num_preamble)), Upchirp_ind])
    for i in range(1, indices.shape[0]):
        if len(temp) == 0:
            temp.append(indices[i,:])
        else:
            if min(abs(indices[i][0] - np.array(temp)[:,0])) > 5:
                temp.append(indices[i,:])
    temp = np.array(temp)
    Upchirp_ind = temp

    return [Upchirp_ind]