Beispiel #1
0
def test_slow():
    if False:
        a = load_merfish(fname)
        assert not is_sorted(a['cellID'])
        cell_order = np.argsort(a['cellID'])
        assert not is_sorted(cell_order)
        assert is_sorted(a['cellID'][cell_order])
Beispiel #2
0
    def __init__(self, src_id, seed, change_times, rates):
        """Creates a broadcaster which tweets with the given rates."""
        super(PiecewiseConst, self).__init__(src_id, seed)

        assert is_sorted(change_times)

        self.change_times = change_times
        self.rates = rates

        self.init = False
        self.times = None
        self.t_diff = None
        self.start_idx = None
Beispiel #3
0
    def build(self):
        idx = np.array(self.included_nodes,dtype=np.int)
        assert is_sorted(idx)

        # Define matrix and block sizes
        block_size = idx.size # Use the included number 
        block_num = self.num_actions+1
        N = block_size*block_num # Total matrix size

        # First block of q is the state weight vector
        q = np.empty(N)
        q[:block_size] = -self.state_weights[idx]

        # Build the other blocks
        row = []
        col = []
        data = []
        for block in xrange(1,block_num):
            shift = block*block_size
            action = block - 1
            # Get the E block (I - g P)
            # Immediately slice off omitted nodes
            E = self.mdp.get_E_matrix(action)
            E = self.contract_sparse_square_matrix(E)

            # Add both E and -E.T to the growing COO structures
            row.extend([E.row,E.col + shift])
            col.extend([E.col + shift,E.row])
            data.extend([E.data,-E.data])

            # Add included costs to the q block
            q[shift:(shift+block_size)] = self.mdp.costs[action][idx]

        # Add regularization
        row.append(np.arange(N))
        col.append(np.arange(N))
        data.extend([self.val_reg*np.ones(block_size),
                     self.flow_reg*np.ones((block_num-1)*block_size)])

        # Concat COO structures into single vectors
        row = np.concatenate(row)
        col = np.concatenate(col)
        data = np.concatenate(data)
        
        # Build the COO matrix
        M = sps.coo_matrix((data,(row,col)),
                           shape=(N,N),dtype=np.double)

        # Assemble into LCP object; return
        return LCPObj(M,q)
Beispiel #4
0
    def __call__(self, p):
        self.model.p = p[:-1]
        distances = list()
        for component in self.model._components:
            distances.append(np.sqrt(component.p[1] ** 2. +
                                     component.p[2] ** 2.))
        if not is_sorted(distances):
            print "Components are not sorted:("
            return -np.inf
        lnpr = list()
        for component in self.model._components:
            lnpr.append(component.lnpr)
        lnpr.append(sp.stats.uniform.logpdf(p[-1], 0, 2))

        return sum(lnpr)
Beispiel #5
0
    def __call__(self, p):
        self.model.p = p[:]
        distances = list()
        for component in self.model._components:
            distances.append(np.sqrt(component.p[1]**2. + component.p[2]**2.))
        if not is_sorted(distances):
            print("Components are not sorted:(")
            return -np.inf
        lnpr = list()
        for component in self.model._components:
            # This is implemented in ``Model.p``
            # component.p = p[:component.size]
            # p = p[component.size:]
            #print "Got lnprior for component : ", component.lnpr
            lnpr.append(component.lnpr)

        return sum(lnpr)
Beispiel #6
0
def load_cells(fname: str, cell_ids: List, verbose=True):
    cell_ids = [int(i) for i in cell_ids]

    if verbose:
        print("Analyzing", basename(fname))
    df = load_merfish(fname)

    df_cell_ids = np.array(df["cellID"])
    mask = df_cell_ids == cell_ids[0]
    for i in cell_ids[1:]:
        mask |= df_cell_ids == i
    df = df[mask]
    df.sort(order='cellID')
    if True:
        assert is_sorted(df['cellID'])
    if verbose:
        print(f"Selected {len(df):,d} points")
    return df
Beispiel #7
0
    def __init__(self,node_lists):
        self.dim = len(node_lists)
        self.node_lists = np.array(node_lists)
        # List of np.ndarray cutpoint locations

        for nl in node_lists:
            assert nl.ndim == 1 # 1D array
            assert nl.size >= 2 # At least two nodes
            assert is_sorted(nl)
        
        # Number of cutpoints along each dimension
        desc = [(nl[0],nl[-1],nl.size) for nl in node_lists]
        (low,hi,num) = zip(*desc)
        self.lower_bound = np.array(low)
        self.upper_bound = np.array(hi)
        self.num_nodes = np.array(num)
        
        self.num_cells = self.num_nodes - 1

        # Initialize the indexer
        self.indexer = Indexer(self.num_nodes)

        # Fuzz to convert [low,high) to [low,high]
        self.fuzz = 1e-12
 def test_is_sorted(self):
     sorted_list = [2, 3, 6, 7]
     unsorted_list = [3, 2, 6, 7]
     self.assertTrue(utils.is_sorted(sorted_list))
     self.assertFalse(utils.is_sorted(unsorted_list))
Beispiel #9
0
    def test_sorting(self):
        """Tests if the list is sorted"""
        items = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
        merge_sort.iterative(items)

        self.assertTrue(utils.is_sorted(items))
Beispiel #10
0
def get_user_data_for(user_id):
    hs = get_user_repository()

    try:
        user_tweet_times = hs.get_user_tweets(user_id)

        assert is_sorted(user_tweet_times), "User tweet times were not sorted."
        # TODO: This should ideally be the last 2 months instead of tweeting history of
        # the broadcaster. Or ....
        # Is 2 months now.

        first_tweet_time, last_tweet_time = user_tweet_times[
            0], user_tweet_times[-1]
        exp_times = get_start_end_time()
        start_time = exp_times.start_time  # GMT: Wed, 01 Jul 2009 00:00:00 GMT
        end_time = exp_times.end_time  # GMT: Tue, 01 Sep 2009 00:00:00 GMT

        if last_tweet_time < start_time:
            # This user did not tweet in the relevant period of time
            return (user_id, None)

        # user_intensity = calc_avg_user_intensity(user_tweet_times,
        #                                          start_time,
        #                                          end_time)

        # These are the ids of the sink
        user_followers = hs.get_user_followers(user_id)
        user_relevant_followers = []
        log("Num followers of {} = {}".format(user_id, len(user_followers)))
        edge_list, sources = [], []

        for idx, follower_id in enumerate(user_followers):
            followees_of_follower = len(hs.get_user_followees(follower_id))
            if followees_of_follower < 500:
                # If the number of followees of the follower are > 500, then Do not
                # create their walls. This will discard about 30% of all users.
                user_relevant_followers.append(follower_id)
                wall = hs.get_user_wall(follower_id, excluded=user_id)
                follower_source = make_real_data_broadcaster(
                    follower_id, wall, start_time, end_time)
                log("Wall of {} ({}/{}) has {} tweets, {} relevant".format(
                    follower_id, idx + 1, len(user_followers), len(wall),
                    follower_source.get_num_events()))
                # The source for follower_id has the same ID
                # There is one source per follower which produces the tweets
                sources.append(follower_source)
                edge_list.append((follower_id, follower_id))

        # The source user_id broadcasts tweets to all its followers
        edge_list.extend([(user_id, follower_id)
                          for follower_id in user_relevant_followers])

        other_source_params = [('RealData', {
            'src_id': x.src_id,
            'times': x.times
        }) for x in sources]

        sim_opts = SimOpts(edge_list=edge_list,
                           sink_ids=user_relevant_followers,
                           src_id=user_id,
                           q_vec=def_q_vec(len(user_relevant_followers)),
                           s=1.0,
                           other_sources=other_source_params,
                           end_time=scaled_period)

        return (user_id, (sim_opts,
                          scale_times(user_tweet_times, start_time, end_time,
                                      scaled_period)))
    except Exception as e:
        print('Encountered error', e, ' for user {}'.format(user_id))
        return user_id, None
    finally:
        hs.close()
Beispiel #11
0
    def test_sorting(self):
        """Tests if the items are sorted"""
        items = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
        quick_sort.recursive(items)

        self.assertTrue(utils.is_sorted(items))
Beispiel #12
0
def test_is_sorted2():
    a = np.array([0, 2, 2], dtype=np.uint8)
    assert is_sorted(a)
Beispiel #13
0
def test_is_sorted1():
    a = np.array([1, 2, 0], dtype=np.uint8)
    assert not is_sorted(a)