Example #1
0
    def advance_sim(self, n_steps: int) -> None:
        """Advance the simulation `n_steps` into the future."""
        for _ in range(n_steps):
            for moon_a, moon_b in distinct_combinations(self.moons, 2):
                moon_a.apply_grav(moon_b)

            for moon in self.moons:
                moon.step_velocity()

            self._timestep += 1
            self.save_state()
Example #2
0
def is_square(combination, epsilon=1):
    """ Return whether or not a set of four lines makes a square, and the side length.
    Each line should be in polar coordinates in the format (angle, dist).
    
    Epsilon here is the amount of difference allowed in the angle of the lines
    to decide it they're parallel, measured in degrees.
    
    
    Return tuple (square, size,)
    square = bool whether or not the lines form a square.
    size = the side length of the square. 0 if not a square.
    
    """
    # Compare the four lines pairwise.
    line_comps = list(distinct_combinations(combination,2))
    n_parallel = 0
    n_perpendicular = 0
    dists = []
    for comp in line_comps:
        # Get the angle between the lines.
        
        # First check the signs on the angles to the origin.       
        # If the sign is the same, take the abs of the difference.
        if np.sign(comp[0][0]) == np.sign(comp[1][0]):
            angle = abs(comp[0][0] - comp[1][0])
        # If the sign is different, sum the abs vals.
        else:
            angle = abs(comp[0][0]) + abs(comp[1][0])

        # We want to know if they're perpendicular or parallel.
        if almostEqual(comp[0][0], comp[1][0], epsilon):
            # They're parallel!
            n_parallel += 1
            # We want to know the distance between these lines.
            dists.append(np.sqrt((comp[0][1] - (comp[1][1]))**2))
        elif almostEqual(angle,np.pi/2, epsilon):
            # They're perpendicular!
            n_perpendicular += 1
        else:
            # These are neither parallel nor perpendicular.
            # This whole combination can be discarded.
            return False, 0

    if n_parallel == 2 and n_perpendicular == 4 and len(dists) == 2:
        # This is a rectangle.
        # Check if it's a square. Allow +/- 5%
        if abs(dists[0] - dists[1]) <= min(dists)*0.05:
            # This is a square. Return True and the size.
            return True, np.mean((dists[0],dists[1]))
    return False, 0
Example #3
0
    def __init__(self, task_config, fft_config, ecei_config):
        """Initialize the object with a fixed channel list, a fixed name of the analysis to be performed
        and a fixed set of parameters for the analysis routine.

        Inputs:
        =======
        channel_range: list of strings, defines the name of the channels. This should probably match the
                      name of the channels in the BP file.
        task_config: dict, defines parameters of the analysis to be performed
        fft_config dict, gives parameters of the fourier-transformed data
        """

        # Stores the description of the task. This can be arbitrary
        self.description = task_config["description"]
        # Stores the name of the analysis we are going to execute
        self.analysis = task_config["analysis"]

        # Parse the reference and cross channels.
        self.ref_channels = channel_range.from_str(task_config["ref_channels"])
        # These channels serve as the cross-data for the spectral diagnostics
        self.cmp_channels = channel_range.from_str(task_config["cmp_channels"])

        self.task_config = task_config
        self.fft_config = fft_config
        self.ecei_config = ecei_config

        self.storage_scheme = {
            "ref_channels": self.ref_channels.to_str(),
            "cmp_channels": self.cmp_channels.to_str()
        }

        # Construct a list of unique channels
        # F.ex. we have ref_channels [(1,1), (1,2), (1,3)] and cmp_channels = [(1,1), (1,2)]
        # The unique list of channels is then
        # (1,1) x (1,1), (1,1) x (1,2)
        # (1,2) x (1,2) !!! Omit (1,2) x (1,1)
        # (1,3) x (1,1)
        # (1,3) x (1,2)
        channel_pairs = [
            channel_pair(cr, cx) for cr in self.ref_channels
            for cx in self.cmp_channels
        ]
        # Make a list, so that we don't exhause the iterator after the first call.
        self.unique_channels = list(
            more_itertools.distinct_combinations(channel_pairs, 1))
        self.channel_chunk_size = task_config["channel_chunk_size"]
Example #4
0
def _generate_cooc_graph(df_corpus, size_min, item):
    """The `_generate_cooc_graph` function builds a co-occurrence networkx object `G(N,E)` 
    out of the dataframe `df_corpus` composed of two columns : 
    `pub_id` (article identifier) and `item` (item value).
       
    Example:
        ========= =======
         pub_id    item    
        ========= =======    
             0      item1  
             0      item2       
             1      item1     
             1      item1     
             1      item3      
             1      item3     
             2      item4      
             2      item5   
             2    unknown 
        ========= =======
    
    First, `df_corpus` is cleaned by eliminating duplicated rows or with the item-value `unknown`.
    This results in:
        ========= =======
         pub_id    item    
        ========= =======    
           0      item1  
           0      item2    
           1      item1    
           1      item3          
           2      item4     
           2      item5  
        ========= =======     
    
    The set of nodes `N` is the set of the items `{item1,item2,item3,...}`.
    The set of edges `E` is the set of tuples `{(item_i,item_j),...}` where:   
          1.  `item_i` and `item_j` are related to the same `pub_id`;
          2.  `item_i` and `item_j` are different;
          3.  `(item_i,item_j)` and `(item_j,item_i)` are equivalent.
     
    This means:
          `N = {item1,item2,item3,item4,item5}`
          
          `E={(item1,item2),(item1,item3),(item4,item5)}`.
     
    The size of the node associated with `item_i` is the number of occurrences of `item_i` 
    that should be >= than `size_min`. So we have: 
    
        size of `item1` node is 2
        
        size of `item2` node is 1
     
    The weight `w_ij` of an edge is the number of occurrences of the tuple `(item_i,item_j)` in the
    list of tuples `[(item_i,item_j),...]` where: 
         1.  `item_i` and `item_j` are related to the same `pub_id`;
         2.  `item_i` and `item_j` are different; 
         3.  `(item_i,item_j)` and `(item_j,item_i)` are equivalent.
     
    The nodes have one ID and two attributes: the size of the node and its label. 
    If `item = "CU"`, the longitude and latitude (in degree) of the country capital 
    are added as attributes of the node to be compatible with the Geo Layout of Gephy.
     
    The edges have two attributes: the edge weight `w_ij` and its Kessler similarity `kess_ij`.
    The Kessler similarity of the edge of the nodes `node_i` and node_j` is defined as:                              
    
    .. math:: kess_{ij} = \\frac{w_{ij}}{\\sqrt{size(node\_i) . size(node\_j)}} 
    
    Args:
        df_corpus (dataframe): dataframe structured as `|pub_id|item|`.
        size_min (int): minimum size of the nodes to be kept (default: 1).
        item (str): item label (ex: "AU", "CU") of which co-occurrence graph is generated.

    Returns:
        `networkx object`: co-occurrence graph `G` of the item `item`; 
                          `G=None` if the graph has only one node.
        
    """

    # Standard library import
    import math
    from collections import defaultdict

    # 3rd party import
    import networkx as nx
    from more_itertools import distinct_combinations

    # Local imports
    from .BiblioGeneralGlobals import COUNTRIES_GPS

    #                           Cleaning of the dataframe
    # -----------------------------------------------------------------------------------------
    df_corpus.drop_duplicates(
        inplace=True)  # Keeps unique occurrence of an item
    # per article
    df_corpus.drop(index=df_corpus[df_corpus["item"] == "unknown"].index,
                   inplace=True)  # Drops rows with "unknown" items

    dg = (df_corpus.groupby("item").count().reset_index()
          )  # Number of occurrences of an item
    dg.columns = ["item", "count"]
    labels_to_drop = dg.query(
        "count<@size_min")["item"].to_list()  # List of items whith a number
    # of occurrences less than size_min
    index_to_drop = [
        x[0] for x in zip(df_corpus.index, df_corpus["item"])
        if x[1] in labels_to_drop
    ]
    df_corpus.drop(index_to_drop, inplace=True)  # Cleaning of the dataframe

    #                 Building the set of nodes and the set of edges
    # -----------------------------------------------------------------------------------------
    df_corpus.columns = ["pub_id", "item"]
    nodes_id = list(set(df_corpus["item"])
                    )  # Attribution of an integer id to the different items
    dic_nodes = dict(zip(nodes_id, range(
        len(nodes_id))))  # Number of an item occurrence keyed by the
    #   node id
    dic_size = dict(zip(dg["item"], dg["count"]))
    nodes_size = {dic_nodes[x]: dic_size[x] for x in nodes_id}

    del dg, nodes_id, dic_size

    if len(nodes_size) < 2:  # Dont build a graph with one or zero node
        G = None
        del df_corpus, dic_nodes

    else:
        list_edges = []
        weight = defaultdict(int)
        for group_by_pub_id in df_corpus.groupby("pub_id"):
            for edges in list(
                    distinct_combinations(group_by_pub_id[1]["item"].to_list(),
                                          2)):
                if edges:
                    edge = (dic_nodes[edges[0]], dic_nodes[edges[1]])
                    if edge not in list_edges:
                        list_edges.append(edge)
                        weight[edge] = 1
                    else:
                        weight[edge] += 1
        del df_corpus
        #                            Building the networx object graph G
        # -------------------------------------------------------------------------------------
        G = nx.Graph()

        G.add_nodes_from(dic_nodes.values())
        nx.set_node_attributes(G, nodes_size, "node_size")
        nodes_label = dict(zip(dic_nodes.values(), dic_nodes.keys()))
        del dic_nodes

        nx.set_node_attributes(G, nodes_label, "label")
        if item == "CU":
            lat, lon = map(
                list,
                zip(*[COUNTRIES_GPS[nodes_label[node]] for node in G.nodes]))
            lat_dict = dict(zip(G.nodes, lat))
            lon_dict = dict(zip(G.nodes, lon))
            nx.set_node_attributes(G, lat_dict, "lat")
            nx.set_node_attributes(G, lon_dict, "lon")
            del lat_dict, lon_dict

        G.add_edges_from(list_edges)
        nx.set_edge_attributes(G, weight, "nbr_edges")
        kess = {}
        for (edge) in (
                list_edges
        ):  # Computes the Kessler similarity betwween node edge[0] and node edge[1]
            kess[edge] = weight[edge] / math.sqrt(
                nodes_size[edge[0]] * nodes_size[edge[1]])
        nx.set_edge_attributes(G, kess, "kessler_similarity")
        del list_edges, weight, nodes_label

    del nodes_size

    return G
    # Quantity matters. Let's count them and decide what to do with the image
    # based on the number of lines.
    n_lines = len(angle)
    count_ok = 3 < n_lines < 9

    #%%% Check for squares and get the scale

    # Now, if we have a decent shot at success, we want to try to find a square.
    if count_ok:

        # Associate angle and distance in a way that will be easier to iterate
        lines = [(a, r) for a, r in zip(angle, ro)]

        # Check each distinct combination of 4 lines to see if they make a square
        combs = list(more_itertools.distinct_combinations(lines, 4))

        # Ok here we go.
        outputs = []
        square_combs = []
        for comb in combs:
            square, size = cp.is_square(comb)
            outputs.append((square, size))
            if square:
                square_combs.append(comb)

        # If any of those combinations made a square, we'll get a True in the output.
        if any([out[0] for out in outputs]):

            # Measure them
            square_sizes = [out[1] for out in outputs if out[0]]
Example #6
0
    t_raster_temp = np.absolute(t_raster - targetT[j])
    ind_temp = np.where(t_raster_temp == t_raster_temp.min())[1][0]
    LFP_cut.append(New_LFP[j, ind_temp - tbefTar:ind_temp + taftTar])
    # LFP_cut.append(New_LFP[j,ind_temp-tbefTar:ind_temp]-np.flipud(New_LFP[j,ind_temp+1:ind_temp+taftTar+1]))

LFP_cut = np.array(LFP_cut)

# set input and output
AllXX = LFP_cut
Allyy = np.ravel(Allyy)

uniNerLab, Labcounts = np.unique(AllneuronLab, return_counts=True)

# train models with data from 1,2,3,... neurons in a iteration
NumNeu = 1
NerLab = list(distinct_combinations(list(uniNerLab), NumNeu))

# NumNeu=len(uniNerLab)
# NerLab=[tuple(uniNerLab)]

Allconf_matrix_list = []
Allaccuracy_train = np.empty([len(NerLab), (len(models))])
Allaccuracy_test = np.empty([len(NerLab), (len(models))])
Allaverage_precision = np.empty([len(NerLab), (len(models))])
for i in range(len(NerLab)):
    ind = np.nonzero(np.isin(AllneuronLab, NerLab[i]))
    print('ALLsamples in neuron#' + str(NerLab[i]) + ':' + str(len(ind[0])))
    AllX = AllXX[ind[0]]
    Ally = Allyy[ind[0]]
    # balance number of samples across classes, equal to the num of samples from the smallest class
    clss, clssnum = np.unique(Ally, return_counts=True)
def build_coupling_graph(in_dir):
    '''The "build_coupling_graph" function builds a corpus coupling graph G(N,E) where:
            - N is the set of nodes with two attributes: article ID and number of references
            - E is the set of edges. An edge links two articles if and only if:
                   (i)   they share at least "BCTHR" references
                   (ii)  each article has at least "NRTHR" references
                   (iii) their Kessler similarity w_ij is >= "WTHR"
              An edge has two attributes: number of shared references 
                                          and the Kessler similarity     
       The Kessler similarity is defined as: 
                       
                  # of common references between pub_id_i and pub_id_j
         w_ij = ---------------------------------------------------------
                sqrt(# references of pub_id_i * # references of pub_id_j)
                                 
       Args:
           in_dir (Path): folder path of the corpus parsed files generated 
                          by the BiblioParsingWos module or the BiblioParsingScopus module.
       
       Returns:
           G (networkx object): corpus coupling graph.
        
    '''

    # Standard library import
    from collections import defaultdict
    from more_itertools import distinct_combinations
    from pathlib import Path

    # 3rd party import
    import math
    import networkx as nx
    import pandas as pd

    # Local imports
    from .BiblioSpecificGlobals import COL_NAMES
    from .BiblioSpecificGlobals import COUPL_GLOBAL_VALUES
    from .BiblioSpecificGlobals import DIC_OUTDIR_PARSING

    BCTHR = COUPL_GLOBAL_VALUES['BCTHR']
    RTUTHR = COUPL_GLOBAL_VALUES['RTUTHR']
    WTHR = COUPL_GLOBAL_VALUES['WTHR']
    NRTHR = COUPL_GLOBAL_VALUES['NRTHR']

    pub_id_alias = COL_NAMES['articles'][0]
    author_alias = COL_NAMES['articles'][1]
    year_alias = COL_NAMES['articles'][2]
    journal_alias = COL_NAMES['articles'][3]
    author_ref_alias = COL_NAMES['references'][1]

    # The references and their ids are extracted from the file articles.dat (tsv format)
    # ---------------------------------------------------------------------------------------

    # TO DO: set columns by names
    usecols = [COL_NAMES['articles'][i] for i in [0, 1, 2, 3, 4, 5]]
    df_article = pd.read_csv(in_dir / Path(DIC_OUTDIR_PARSING['A']),
                             sep='\t',
                             usecols=usecols).fillna(0).astype(str)

    table_art = df_article.apply(
        lambda row: ', '.join(row[1:]), axis=1
    )  # Builds article: "pub_id, author, year, journal, volume, page"
    # ex:"6354, Name S., 2004, SCIENCE, 306, 496"
    table_art = [x.replace(', 0', '') for x in table_art
                 ]  # takes care of the unknown volume and/or page number
    # ex: "6354, Name S., 2010, THESIS"
    df_article['label_article'] = table_art

    df_article[pub_id_alias] = pd.to_numeric(df_article[pub_id_alias])

    # 1- Creates the dict named ref_table = {ref A: [list of article id (pub_id) citing ref A]}
    # ex : ref_table = {'Bellouard Q, 2017, INT. J. HYDROG. ENERGY, 42, 13486': [0, 50],...}
    # 2- creates the dict named nR = {pub_id: number of references of pub_id}
    # ---------------------------------------------------------------------------------------

    usecols = [COL_NAMES['references'][i] for i in [0, 1, 2, 3, 4, 5]]
    df_reference = pd.read_csv(in_dir / Path(DIC_OUTDIR_PARSING['R']),
                               sep='\t',
                               usecols=usecols,
                               na_filter=False).astype(str)
    #df_reference.columns = ['pub_id','first_author','year','journal','volume','page']

    table = df_reference.apply(
        lambda row: ", ".join(row[1:]),
        axis=1)  # Builds ref: "pub_id, author, year, journal, volume, page"
    # ex:"6354, Name S., 2004, SCIENCE, 306, 496"
    table = [x.replace(', 0', '') for x in table
             ]  # takes care of the unknown volume and/or page number
    # ex: "6354, Name S., 2010, THESIS"
    df_reference['label_ref'] = table

    df_reference[pub_id_alias] = pd.to_numeric(df_reference[pub_id_alias])

    nR = df_reference.groupby(pub_id_alias).count().to_dict()[author_ref_alias]

    ref_table = {
        x[0]: x[1].tolist()
        for x in df_reference.groupby('label_ref')[pub_id_alias]
    }

    # Builds the dict of dicts BC_table such as:
    #   BC_table = {pub_id_i:{pub_id_j: number of common references of pub_id_i and pub_id_j,...},...}
    # ex : BC_table = {0: {50: 8, 55: 2, 121: 2, 10: 2},...}
    #   pub_id 0 has 8 common references with pub_id 50; pub_id 0 has 2 common references with pub_id 55
    #----------------------------------------------------------------------------------------------

    BC_table = {}
    for reference in ref_table:
        if len(ref_table[reference]
               ) >= RTUTHR:  # The reference is cited more than RTUTHR-1 times
            for pub_id_i, pub_id_j in distinct_combinations(
                    ref_table[reference], 2):
                if pub_id_i not in BC_table:
                    BC_table[pub_id_i] = dict()
                if pub_id_j not in BC_table[pub_id_i]:
                    BC_table[pub_id_i][pub_id_j] = 0
                BC_table[pub_id_i][pub_id_j] += 1

    # Builds the graph netwokx objet G with edge atributes:
    #    1-   the Kessler similarity  w_ij
    #    2-   the number of common references between pub_id_i and pub_id_j
    #----------------------------------------------------------------------------------------------

    G = nx.Graph()
    for pub_id_i in BC_table:
        for pub_id_j in BC_table[pub_id_i]:
            w_ij = (1.0 * BC_table[pub_id_i][pub_id_j]) \
                   / math.sqrt(nR[pub_id_i] * nR[pub_id_j]) # Kessler similarity
            if ((BC_table[pub_id_i][pub_id_j] >= BCTHR
                 )  # Number of common references 
                    #   between id_i and id_j>=BCTHR
                    and (nR[pub_id_i] >= NRTHR
                         )  # Number of references of id_i>=NRTHR (default=1)
                    and (nR[pub_id_j] >= NRTHR
                         )  # Number of references of id_j>=NRTHR (default=1)
                    and (w_ij >= WTHR)  # Kessler similarity >=WTHR (default=0)
                ):
                G.add_edge(pub_id_i,
                           pub_id_j,
                           weight=w_ij,
                           nc=BC_table[pub_id_i][pub_id_j])

    nx.set_node_attributes(G, nR, 'nbr_references')

    node_label = {
        x: df_article.loc[df_article[pub_id_alias] == x,
                          'label_article'].tolist()[0]
        for x in G.nodes
    }
    nx.set_node_attributes(G, node_label, 'label')

    node_first_author = {
        x: df_article.loc[df_article[pub_id_alias] == x,
                          author_alias].tolist()[0]
        for x in G.nodes
    }
    nx.set_node_attributes(G, node_first_author, 'first_author')

    node_year = {
        x: df_article.loc[df_article[pub_id_alias] == x,
                          year_alias].tolist()[0]
        for x in G.nodes
    }
    nx.set_node_attributes(G, node_year, 'year')

    node_journal = {
        x: df_article.loc[df_article[pub_id_alias] == x,
                          journal_alias].tolist()[0]
        for x in G.nodes
    }
    nx.set_node_attributes(G, node_journal, 'journal')

    return G
Example #8
0
    def __init__(self, task_config, fft_config, ecei_config, storage_config):
        """Initialize the object with a fixed channel list, a fixed name of the analysis to be performed
        and a fixed set of parameters for the analysis routine.

        Inputs:
        =======
        task_config: dict, defines parameters of the analysis to be performed
        fft_config: dict, gives parameters of the fourier-transformed data
        ecei_config: dict, information on ecei diagnostic
        """

        self.task_config = task_config
        self.ecei_config = ecei_config
        self.storage_config = storage_config
        self.logger = logging.getLogger("simple")


        # Stores the description of the task. This can be arbitrary
        self.description = task_config["task_description"]
        # Stores the name of the analysis we are going to execute
        self.analysis = task_config["analysis"]

        if self.analysis == "cross_phase":
            self.kernel = kernel_crossphase_64_cy
        elif self.analysis == "cross_power":
            self.kernel = kernel_crosspower_64_cy
        elif self.analysis == "cross_correlation":
            self.kernel = kernel_crosscorr
        elif self.analysis == "coherence":
            self.kernel = kernel_coherence_64_cy
        elif self.analysis == "skw":
            self.kernel = kernel_skw
        elif self.analysis == "bicoherence":
            self.kernel = kernel_bicoherence
        elif self.analysis == "null":
            self.kernel = kernel_null
        else:
            raise NameError(f"Unknown analysis task {self.analysis}")
        

        # Parse the reference and cross channels.
        self.ref_channels = channel_range.from_str(task_config["ref_channels"])
        # These channels serve as the cross-data for the spectral diagnostics
        self.cmp_channels = channel_range.from_str(task_config["cmp_channels"])

        # Construct a list of unique channels
        # F.ex. we have ref_channels [(1,1), (1,2), (1,3)] and cmp_channels = [(1,1), (1,2)]
        # The unique list of channels is then
        # (1,1) x (1,1), (1,1) x (1,2)
        # (1,2) x (1,2) !!! Omits (1,2) x (1,1)
        # (1,3) x (1,1)
        # (1,3) x (1,2)
        channel_pairs = [channel_pair(cr, cx) for cr in self.ref_channels for cx in self.cmp_channels]
        # Make a list, so that we don't exhaust the iterator after the first call.
        self.unique_channels = [i[0] for i in more_itertools.distinct_combinations(channel_pairs, 1)]
        # Number of channel pairs per future
        self.channel_chunk_size = task_config["channel_chunk_size"]
        # Total number of chunks, i.e. the number of futures appended to the list per call to calculate
        self.num_chunks = (len(self.unique_channels) + self.channel_chunk_size - 1) // self.channel_chunk_size

        # Get the configuration from task_fft_scipy, but don't store the object.
        fft_config["fsample"] = ecei_config["SampleRate"] * 1e3
        self.my_fft = task_fft_scipy(self.channel_chunk_size, fft_config, normalize=True, detrend=True)
        self.fft_params = self.my_fft.get_fft_params()

        self.storage_backend = None
        if self.storage_config["backend"] == "numpy":
            self.storage_backend = backends.backend_numpy(self.storage_config)
        elif self.storage_config["backend"] == "mongo":
            self.storage_backend = backends.backend_mongodb(self.storage_config)
        elif self.storage_config["backend"] == "null":
            self.storage_backend = backends.backend_null(self.storage_config)
        else:
            raise NameError(f"Unknown storage backend requested: {self.storage_config}")

        self.storage_backend.store_metadata(self.task_config, self.get_dispatch_sequence())
Example #9
0
File: dice.py Project: wrq/pyjunk
def sums_from_dice(dice):
  return set(map(sum, list(more.distinct_combinations(dice, 2))))
Example #10
0
File: dice.py Project: wrq/pyjunk
def pair_sums():
  for perm in diceperms:
    dcombs = more.distinct_combinations(perm, 2)
    yield (perm, list(map(sum, list(dcombs))))
Example #11
0
File: dice.py Project: wrq/pyjunk
  for _ in range(10_000_000):
    xsum = sum_n_dice(x)
    ysum = sum_n_dice(y)

    if xsum > ysum:
      xwins += 1
    if xsum < ysum:
      ywins += 1
    if xsum == ysum:
      pass

  print(f"xwins = {xwins} ywins = {ywins} ratio = {xwins/ywins}")

# dice_fight(2,3)

all_twos = list(more.distinct_combinations([1,2,3,4,5,6] * 2, 2))
all_threes = list(more.distinct_combinations([1,2,3,4,5,6] * 3, 3))

def dice_fight2():
  two_wins = 0
  three_wins = 0
  draws = 0
  for two in all_twos:
    for three in all_threes:
      if sum(two) > sum(three):
        two_wins += 1
      if sum(two) < sum(three):
        three_wins += 1
      if sum(two) == sum(three):
        draws += 1
def get_candidate_curves(s_id, intersections_3d, strokes_topology, camera,
                         bbox_diag):

    candidate_curves = []
    # for each candidate line, include at least the intersections which were used
    # to create the candidate_line
    creation_intersections = []
    intersection_normals = [[] for i in range(len(intersections_3d))]

    #print("s_id: ", s_id)
    #start_time = time.clock()
    for inter_id, inter in enumerate(intersections_3d):
        if inter.coords_3d is None:
            continue
        # include the three major planes
        for i in range(3):
            cand_curve = tools_3d.Curve3D()
            plane_point = inter.coords_3d
            plane_normal = np.zeros(3)
            plane_normal[i] = 1.0
            #geometry = camera.lift_polyline_to_plane(strokes_topology[s_id]["stroke"],
            #										 plane_point, plane_normal)
            #print("geometry comparison")
            #print(np.array(geometry))
            intersection_normals[inter_id].append(plane_normal)
            geometry = camera.lift_polyline_to_plane_vectorized(
                strokes_topology[s_id]["stroke"], plane_point, plane_normal)
            #print(geometry)
            #sys.exit()
            #if len(geometry) == 0:
            #	continue
            cand_curve.geometry = geometry
            cand_curve.plane_point = plane_point
            cand_curve.plane_normal = plane_normal
            candidate_curves.append(cand_curve)
            creation_intersections.append([inter_id])

        if strokes_topology[s_id]["is_ellipse"]:
            continue
        curr_s_id = np.argwhere(
            np.array(inter.stroke_ids) == s_id).flatten()[0]
        other_s = strokes_topology[inter.stroke_ids[1 - curr_s_id]]
        if other_s["primitive_type"] == 0:
            # add scaffold planes
            for plane in other_s["planes"]:
                plane_point = plane["plane_point"]
                plane_normal = plane["plane_normal"]
                if np.isclose(np.linalg.norm(plane_normal), 0.0):
                    continue

                used_normals = np.array(intersection_normals[inter_id])
                if len(used_normals) > 0:
                    used_dot = 1.0 - np.abs(np.dot(used_normals, plane_normal))
                    if np.any(used_dot < np.deg2rad(0.1) / np.pi):
                        continue
                intersection_normals[inter_id].append(plane_normal)

                geometry = camera.lift_polyline_to_plane_vectorized(
                    strokes_topology[s_id]["stroke"], plane_point,
                    plane_normal)
                if len(geometry) == 0:
                    continue
                cand_curve = tools_3d.Curve3D()
                cand_curve.geometry = geometry
                cand_curve.plane_point = plane_point
                cand_curve.plane_normal = plane_normal
                candidate_curves.append(cand_curve)
                creation_intersections.append([inter_id])

    #print("len(candidate_curves)")
    #print(len(candidate_curves))
    # finally, add planes formed by triplets of intersections
    for comb in distinct_combinations(range(len(intersections_3d)), 3):
        if strokes_topology[s_id]["is_ellipse"]:
            continue
        if intersections_3d[comb[0]].coords_3d is None or \
          intersections_3d[comb[1]].coords_3d is None or \
          intersections_3d[comb[2]].coords_3d is None:
            continue
        plane_point = np.array(intersections_3d[comb[0]].coords_3d)
        vec_1 = np.array(intersections_3d[comb[1]].coords_3d) - plane_point
        if np.isclose(np.linalg.norm(vec_1), 0.0):
            continue
        vec_1 /= np.linalg.norm(vec_1)
        vec_2 = np.array(intersections_3d[comb[2]].coords_3d) - plane_point
        if np.isclose(np.linalg.norm(vec_2), 0.0):
            continue
        vec_2 /= np.linalg.norm(vec_2)
        plane_normal = np.cross(vec_1, vec_2)
        if np.isclose(np.linalg.norm(plane_normal), 0.0):
            continue
        plane_normal /= np.linalg.norm(plane_normal)

        # check if similar normal already used by one of the 3 intersections
        normal_already_used = False
        for i in comb:
            used_normals = np.array(intersection_normals[i])
            if len(used_normals) > 0:
                used_dot = 1.0 - np.abs(np.dot(used_normals, plane_normal))
                if np.any(used_dot < np.deg2rad(0.1) / np.pi):
                    normal_already_used = True
                    break
        if normal_already_used:
            continue
        else:
            for i in comb:
                intersection_normals[i].append(plane_normal)

        geometry = camera.lift_polyline_to_plane_vectorized(
            strokes_topology[s_id]["stroke"], plane_point, plane_normal)
        if len(geometry) == 0:
            continue
        cand_curve = tools_3d.Curve3D()
        cand_curve.geometry = geometry
        cand_curve.plane_point = plane_point
        cand_curve.plane_normal = plane_normal
        candidate_curves.append(cand_curve)
        creation_intersections.append([comb[0], comb[1], comb[2]])

    #print(len(candidate_curves))
    cand_curve_bbox = [
        tools.bbox_3d_single_stroke(cand_curve.geometry)
        for cand_curve in candidate_curves
    ]
    #print("collect_planes time: " + str(
    #	(time.clock() - start_time) / 60.0) + " min")
    #for cand_curve in candidate_curves:
    #	if np.all(np.isclose(cand_curve.plane_normal, 0.0)):
    #		print("cand_curve.plane_normal")
    #		print(cand_curve.plane_normal)
    # get intersection sets for all candidate lines: all intersections which are
    # within 0.1*length(cand_line)
    #start_time = time.clock()
    intersection_sets = []
    empty_intersection_sets = []
    for cand_curve_id, cand_curve in enumerate(candidate_curves):
        intersection_set = creation_intersections[cand_curve_id]
        line_length = tools_3d.line_3d_length(cand_curve.geometry)
        merge_dist = min(0.02 * bbox_diag, 0.1 * line_length)
        #merge_dist = min(0.005 * bbox_diag, 0.05 * line_length)
        for inter_3d_id, inter_3d in enumerate(intersections_3d):
            if inter_3d_id in intersection_set or inter_3d.coords_3d is None:
                continue
            if tools_3d.distance_to_bbox(
                    inter_3d.coords_3d,
                    cand_curve_bbox[cand_curve_id]) > merge_dist:
                continue
            #dist_old = tools_3d.distance_point_to_polyline(inter_3d.coords_3d,
            #										   cand_curve.geometry)

            dist = tools_3d.distance_point_to_polyline_vectorized(
                inter_3d.coords_3d, cand_curve.geometry)
            if np.isclose(dist, -1.0):
                continue
            #dist = tools_3d.distance_point_to_polyline(inter_3d.coords_3d,
            #										   cand_curve.geometry)
            #sys.exit()

            if dist < merge_dist:
                #if dist < 0.1 * line_length:
                #if dist < 0.02 * bbox_diag and dist < 0.1 * line_length:
                intersection_set.append(inter_3d_id)
        if len(intersection_set) > 0:
            intersection_sets.append(intersection_set)
        else:
            empty_intersection_sets.append(cand_curve_id)
    # remove empty candidate lines
    for del_id in sorted(empty_intersection_sets, reverse=True):
        del candidate_curves[del_id]

    #print("collect_intersections time: " + str(
    #	(time.clock() - start_time) / 60.0) + " min")

    #start_time = time.clock()
    clustered_candidate_curves, clustered_intersection_sets = \
     cluster_candidate_curves_v2(candidate_curves, intersection_sets, dep_node_id=s_id,
            intersections_3d=intersections_3d)
    #print("clustering time: " + str(
    #	(time.clock() - start_time) / 60.0) + " min")

    #for cand_curve in clustered_candidate_curves:
    #	if np.all(np.isclose(cand_curve.plane_normal, 0.0)):
    #		print("clustered_cand_curve.plane_normal")
    #		print(cand_curve.plane_normal)
    return clustered_candidate_curves, clustered_intersection_sets
    def update_stroke_score_function(self, dep_node_id, sketch, extreme_intersection_distance):
        for sketch_version in self.sketch_versions_reference.keys():
            cand_id, inter_set, score_container = self.sketch_versions_reference[sketch_version][dep_node_id]

            # line coverage
            arc_params = [inter.mid_inter_param[np.argwhere(inter.stroke_ids == self.dependency_nodes[dep_node_id].stroke_id).flatten()[0]]
                          for inter in inter_set]
            inter_dist = np.max(arc_params) - np.min(arc_params)
            line_coverage = 0.0
            if extreme_intersection_distance > 0.0:
                line_coverage = inter_dist/extreme_intersection_distance
            self.sketch_versions_reference[sketch_version][dep_node_id][2].line_coverage = line_coverage

            # axis_alignment
            axis_alignment = self.dependency_nodes[dep_node_id].candidate_nodes[cand_id].axis_alignment
            self.sketch_versions_reference[sketch_version][dep_node_id][2].axis_alignment = axis_alignment

            geom_score = axis_alignment
            if self.dependency_nodes[dep_node_id].axis_label == 3:
                # compute ortho_score
                ortho_score = 0.0
                tangentiality_score = 0.0
                planarity_score = 0.0

                for dep_inter in inter_set:
                    adj_inters = sketch.intersection_graph.get_adjacent_intersections(dep_inter.inter_id)
                    other_dep_node_ids = []
                    for adj_inter in adj_inters:
                        for i in adj_inter.stroke_ids:
                            if i != self.dependency_nodes[dep_node_id].stroke_id and \
                                    self.stroke_id_to_dep_node_id[i] < len(self.sketch_versions_reference[sketch_version]):
                                # check if this intersection has been used by other stroke
                                inter_ids_used = [inter.inter_id for inter in self.sketch_versions_reference[sketch_version][self.stroke_id_to_dep_node_id[i]][1]]
                                if adj_inter.inter_id in inter_ids_used:
                                    other_dep_node_ids.append(self.stroke_id_to_dep_node_id[i])
                    other_dep_node_ids = np.unique(other_dep_node_ids)
                    if len(other_dep_node_ids) == 0:
                        continue

                    other_cand_node_ids = [self.sketch_versions_reference[sketch_version][other_dep_node_id][0]
                                           for other_dep_node_id in other_dep_node_ids]

                    local_ortho_scores = [tools_3d.compute_gaussian_between_lines(
                        self.dependency_nodes[dep_node_id].candidate_nodes[cand_id].geometry,
                        self.dependency_nodes[other_dep_node_id].candidate_nodes[other_cand_id].geometry)
                        for (other_dep_node_id, other_cand_id) in zip(other_dep_node_ids, other_cand_node_ids)]
                    local_tangentiality_scores = [tools_3d.compute_gaussian_between_one_minus_lines(
                        self.dependency_nodes[dep_node_id].candidate_nodes[cand_id].geometry,
                        self.dependency_nodes[other_dep_node_id].candidate_nodes[other_cand_id].geometry)
                        for (other_dep_node_id, other_cand_id) in zip(other_dep_node_ids, other_cand_node_ids)]
                    if len(other_dep_node_ids) > 1:
                        local_planarity_scores = []
                        for comb in distinct_combinations(range(len(other_dep_node_ids)), 2):
                            plane_vec_1 = self.dependency_nodes[other_dep_node_ids[comb[0]]].candidate_nodes[other_cand_node_ids[comb[0]]].geometry
                            plane_vec_2 = self.dependency_nodes[other_dep_node_ids[comb[1]]].candidate_nodes[other_cand_node_ids[comb[1]]].geometry
                            local_planarity_scores.append(tools_3d.compute_planarity_score(
                                self.dependency_nodes[dep_node_id].candidate_nodes[cand_id].geometry,
                                plane_vec_1, plane_vec_2))
                        planarity_score = max(planarity_score, np.max(local_planarity_scores))
                    ortho_score = max(ortho_score, np.max(local_ortho_scores))
                    tangentiality_score = max(tangentiality_score, np.max(local_tangentiality_scores))

                geom_score = max(max(ortho_score, tangentiality_score), planarity_score)
                self.sketch_versions_reference[sketch_version][dep_node_id][2].orthogonality = ortho_score
                self.sketch_versions_reference[sketch_version][dep_node_id][2].tangentiality = tangentiality_score
                self.sketch_versions_reference[sketch_version][dep_node_id][2].planarity = planarity_score

            total_score = 0.4*line_coverage + 0.4*geom_score + 0.2*line_coverage*geom_score

            self.sketch_versions_reference[sketch_version][dep_node_id][-1].total_score = total_score
Example #14
0
             ) as df:
    fft_data = df["fft_data"]

fft_data_64 = np.ascontiguousarray(fft_data)
fft_data_32 = np.require(fft_data_64,
                         dtype=np.complex64,
                         requirements=['A', 'C'])

###################################################
# Generate channels to iterate over

ref_chrg = channel_range.from_str("L0101-2408")
cmp_chrg = channel_range.from_str("L0101-2408")
channel_pairs = [channel_pair(cr, cx) for cr in ref_chrg for cx in cmp_chrg]
unique_channels = [
    i[0] for i in more_itertools.distinct_combinations(channel_pairs, 1)
]

#ch_it = [channel_pair(channel("L", i, 1), channel("L", i, 2)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 3)) for i in range(1, 25)]
#h_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 4)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 4)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 5)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 6)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 7)) for i in range(1, 25)]
#ch_it = ch_it + [channel_pair(channel("L", i, 1), channel("L", i, 8)) for i in range(1, 25)]

ch1_idx_arr = np.array([int(ch_pair.ch1.idx()) for ch_pair in unique_channels],
                       dtype=np.uint64)
ch2_idx_arr = np.array([int(ch_pair.ch2.idx()) for ch_pair in unique_channels],
                       dtype=np.uint64)
Example #15
0
# Encoding: UTF-8
"""
Find out how to get a channel list from tget_dispatch_sequence
and pickle it
"""

import sys
sys.path.append("/global/homes/r/rkube/repos/delta")
import more_itertools
from itertools import chain

from analysis.channels import channel, channel_range, channel_pair

ref_channels = channel_range.from_str("L0101-2408")
cmp_channels = channel_range.from_str("L0101-2408")
channel_pairs = [
    channel_pair(ref, cmp) for ref in ref_channels for cmp in cmp_channels
]
unique_channels = list(more_itertools.distinct_combinations(channel_pairs, 1))

print("Number of channel pairs: {0:d}".format(len(channel_pairs)))
print("Unique channel paits: {0:d}".format(len(unique_channels)))

ch_list = [u[0] for u in unqiue_channels]

np.savez("dispatch_seq.npz", ch_list=ch_list)

# End of file test_channel_it.py