Esempio n. 1
0
    def draw_axis(self, axis, color):
        ticks = self._p._axis_ticks[axis]
        radius = self._p._tick_length / 2.0
        if len(ticks) < 2:
            return

        # calculate the vector for this axis
        axis_lines = [[0, 0, 0], [0, 0, 0]]
        axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
        axis_vector = vec_sub(axis_lines[1], axis_lines[0])

        # calculate angle to the z direction vector
        pos_z = get_direction_vectors()[2]
        d = abs(dot_product(axis_vector, pos_z))
        d = d / vec_mag(axis_vector)

        # don't draw labels if we're looking down the axis
        labels_visible = abs(d - 1.0) > 0.02

        # draw the ticks and labels
        for tick in ticks:
            self.draw_tick_line(axis, color, radius, tick, labels_visible)

        # draw the axis line and labels
        self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
Esempio n. 2
0
    def draw_axis(self, axis, color):
        ticks = self._p._axis_ticks[axis]
        radius = self._p._tick_length / 2.0
        if len(ticks) < 2:
            return

        # calculate the vector for this axis
        axis_lines = [[0, 0, 0], [0, 0, 0]]
        axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
        axis_vector = vec_sub(axis_lines[1], axis_lines[0])

        # calculate angle to the z direction vector
        pos_z = get_direction_vectors()[2]
        d = abs(dot_product(axis_vector, pos_z))
        d = d / vec_mag(axis_vector)

        # don't draw labels if we're looking down the axis
        labels_visible = abs(d - 1.0) > 0.02

        # draw the ticks and labels
        for tick in ticks:
            self.draw_tick_line(axis, color, radius, tick, labels_visible)

        # draw the axis line and labels
        self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
Esempio n. 3
0
 def calculate_deltas_for_hidden_layer(self, next_layer):
     for index, neuron in enumerate(self.neurons):
         next_weights = [n.weights[index] for n in next_layer.neurons]
         next_deltas = [n.delta for n in next_layer.neurons]
         sum_weights_and_deltas = dot_product(next_weights, next_deltas)
         neuron.delta = neuron.derivative_activation_function(
             neuron.output_cache) * sum_weights_and_deltas
Esempio n. 4
0
 def calculate_deltas_for_hidden_layer(self, next_layer: Layer) -> None:
     for index, neuron in enumerate(self.neurons):
         next_weights: List[float] = \
             [n.weights[index] for n in next_layer.neurons]
         next_deltas: List[float] = [n.delta for n in next_layer.neutons]
         sum_weights_and_deltas: List[float] = dot_product(
             next_weights, next_deltas)
         neuron.delta = \
             neuron.derivative_activation_function(neuron.output_cache) \
             * sum_weights_and_deltas
    def predict(self,feature):
       """
       @param feature: feature vector for the test example
       returns +1/-1 : diseased/healthy , which is the sign of the score. 
       """

       score = util.dot_product(self.weights, feature) 
       if score > 0: 
           return 1
       return -1
Esempio n. 6
0
    def hidden_layer_deltas(self, next_layer: Layer) -> None:
        for idx, neuron in enumerate(self.neurons):
            next_weights: List[float] = [
                n.weights[idx] for n in next_layer.neurons
            ]
            next_deltas: List[float] = [n.delta for n in next_layer.neurons]
            sum_weights_and_deltas: float = dot_product(
                next_weights, next_deltas)

            _dev: float = neuron.derivative_function(neuron.output_cache)
            neuron.delta: float = _dev * sum_weights_and_deltas
 def predict(self,feature):
     """
     @param feature: feature vector for test example,
     returns +1/-1 based on the sign of the score 
     +1 -> diseased test case and vice versa. 
     """
     score = util.dot_product(self.weights, feature)
     if score > 0: 
         self.predictDiseased += 1
         #print "score is > 0 yay!" 
         return 1
     return -1
Esempio n. 8
0
 def calculate_deltas_for_hidden_layer(self, next_layer: Layer) -> None:
     for index, neuron in enumerate(self.neurons):
         # manipulate the delta inside the neuron
         # derivative * output error
         next_weights: List[float] = [
             n.weights[index] for n in next_layer.neurons
         ]
         next_deltas: List[float] = [n.delta for n in next_layer.neurons]
         sum_weights_and_deltas: float = dot_product(
             next_weights, next_deltas)
         neuron.delta = (
             neuron.derivative_activation_function(neuron.output_cache) *
             sum_weights_and_deltas)
Esempio n. 9
0
respect = True

# layers = [K**0,K**1,K**2]
layers = [1, 2, 2]
# layers = [1,1,1]

Data = gram.HierarchicalData(layers, fan_out=K, respect_hierarchy=respect)

ll = Data.labels(Data.terminals)
labs = np.where(np.isnan(ll), np.nanmax(ll) + 1, ll)

Ky_all = np.sign((ll[:, :, None] - 0.5) * (ll[:, None, :] - 0.5))
Ky_all = torch.tensor(np.where(np.isnan(Ky_all), 0, Ky_all))

reps = Data.represent_labels(Data.terminals)
Ky = util.dot_product(reps, reps)

plt.figure()
plt.subplot(131)
pos = graphviz_layout(Data.variable_tree, prog="twopi")
nx.draw(Data.variable_tree,
        pos,
        node_color=np.array(Data.variable_tree.nodes).astype(int),
        cmap='nipy_spectral')
dicplt.square_axis()
plt.subplot(132)
plt.imshow(ll, 'bwr')
plt.subplot(133)
plt.imshow(util.dot_product(reps, reps), 'binary')

#%%
Esempio n. 10
0
 def output(self, inputs):
     self.output_cache = dot_product(inputs, self.weights)
     return self.activation_function(self.output_cache)
Esempio n. 11
0
layers = [1, 2, 2]
# layers = [1,1,1]

Data = gram.HierarchicalData(layers,
                             fan_out=K,
                             respect_hierarchy=respect,
                             graph_rule='minimal')

ll = Data.labels(Data.terminals)
labs = np.where(np.isnan(ll), np.nanmax(ll) + 1, ll)

Ky_all = np.sign((ll[:, :, None] - 0.5) * (ll[:, None, :] - 0.5))
Ky_all = torch.tensor(np.where(np.isnan(Ky_all), 0, Ky_all))

reps = Data.represent_labels(Data.terminals)
Ky = util.dot_product(reps, reps)

plt.figure()
plt.subplot(131)
pos = graphviz_layout(Data.value_tree, prog="twopi")
nx.draw(Data.value_tree,
        pos,
        node_color=np.array(Data.value_tree.nodes(data='var'))[:, 1],
        cmap='nipy_spectral')
dicplt.square_axis()
plt.subplot(132)
plt.imshow(ll, 'bwr')
plt.subplot(133)
plt.imshow(util.dot_product(reps, reps), 'binary')

#%%
Esempio n. 12
0
y_ = np.stack([
    targets[inp_condition == i, :].mean(0).detach().numpy()
    for i in np.unique(inp_condition)
]).T
z_ = np.stack(
    [z[inp_condition == i, :].mean(0) for i in np.unique(inp_condition)]).T

dx = la.norm(x_[:, :, None] - x_[:, None, :], axis=0) / 2
dy = la.norm(y_[:, :, None] - y_[:, None, :], axis=0)
dz = la.norm(z_[:, :, None] - z_[:, None, :], axis=0)

# Kx = np.einsum('i...k,j...k->ij...', x_.T-x_.mean(1,keepdims=True).T, x_.T-x_.mean(1,keepdims=True).T)
# Ky = np.einsum('i...k,j...k->ij...', y_.T-y_.mean(1,keepdims=True).T, y_.T-y_.mean(1,keepdims=True).T)
# Kz = np.einsum('i...k,j...k->ij...', z_.T-z_.mean(1,keepdims=True).T, z_.T-z_.mean(1,keepdims=True).T)

Kx = util.dot_product(x_ - x_.mean(1, keepdims=True),
                      x_ - x_.mean(1, keepdims=True))
Ky = util.dot_product(y_ - y_.mean(1, keepdims=True),
                      y_ - y_.mean(1, keepdims=True))
Kz = util.dot_product(z_ - z_.mean(1, keepdims=True),
                      z_ - z_.mean(1, keepdims=True))

inp_align = np.sum(Kz * Kx) / np.sqrt(np.sum(Kx * Kx) * np.sum(Kz * Kz))
out_align = np.sum(Kz * Ky) / np.sqrt(np.sum(Ky * Ky) * np.sum(Kz * Kz))

apply_correction = False
# apply_correction = True

c_xy = np.sum(Ky * Kx) / np.sqrt(np.sum(Kx * Kx) * np.sum(Ky * Ky))

if apply_correction:
    cos_foo = np.linspace(0, 1, 1000)
Esempio n. 13
0
def eval_snake(state):
    '''
    Linear combination of all squares' values. 
    Inspired by Hadi Pouransari & Saman Ghili's "AI algorithms for the game 2048".
    '''
    return util.dot_product(SNAKE_WEIGHTS, util.unroll_board(state.board))
Esempio n. 14
0
 def output(self, inputs: List[float]) -> float:
     self.output_cache = dot_product(inputs, self.weights)
     return self.activation_function(self.output_cache)
Esempio n. 15
0
]).T
y_ = np.stack([
    targets[inp_condition == i, :].mean(0).detach().numpy()
    for i in np.unique(conds)
]).T
z_ = np.stack([z[inp_condition == i, :].mean(0) for i in np.unique(conds)]).T

dx = la.norm(x_[:, :, None] - x_[:, None, :], axis=0) / 2
dy = la.norm(y_[:, :, None] - y_[:, None, :], axis=0)
dz = la.norm(z_[:, :, None] - z_[:, None, :], axis=0)

# Kx = np.einsum('i...k,j...k->ij...', x_.T-x_.mean(1,keepdims=True).T, x_.T-x_.mean(1,keepdims=True).T)
# Ky = np.einsum('i...k,j...k->ij...', y_.T-y_.mean(1,keepdims=True).T, y_.T-y_.mean(1,keepdims=True).T)
# Kz = np.einsum('i...k,j...k->ij...', z_.T-z_.mean(1,keepdims=True).T, z_.T-z_.mean(1,keepdims=True).T)

Kx = util.dot_product(x_ - x_.mean(1, keepdims=True),
                      x_ - x_.mean(1, keepdims=True))
Ky = util.dot_product(y_ - y_.mean(1, keepdims=True),
                      y_ - y_.mean(1, keepdims=True))
Kz = util.dot_product(z_ - z_.mean(1, keepdims=True),
                      z_ - z_.mean(1, keepdims=True))

#%%
x_ = np.stack([
    inputs[inp_condition == i, :].mean(0).detach().numpy()
    for i in np.unique(conds)
]).T
y_ = np.stack([
    targets[inp_condition == i, :].mean(0).detach().numpy()
    for i in np.unique(conds)
]).T
# x_ = inputs.detach().numpy().T
Esempio n. 16
0
        z = rep(this_exp.train_data[0]).detach().numpy()
        # z = this_exp.train_data[0].detach().numpy()
        # z = linreg.predict(this_exp.train_data[0])@W1.T
        n_compute = np.min([5000, z.shape[0]])
        
        idx = np.random.choice(z.shape[0], n_compute, replace=False)
        # idx_tst = idx[::4] # save 1/4 for test set
        # idx_trn = np.setdiff1d(idx, idx_tst)
        
        cond = this_exp.train_conditions[idx]
        # cond = util.decimal(this_exp.train_data[1][idx,...])
        num_cond = len(np.unique(cond))
        
        z_ = np.stack([z[this_exp.train_conditions==i,:].mean(0) for i in np.unique(cond)]).T

        layer_kern.append(util.dot_product(z_-z_.mean(1,keepdims=True), z_-z_.mean(1,keepdims=True)))
        layer_dist.append(la.norm(z_[:,:,None] - z_[:,None,:], axis=0))
        
        # xor = np.where(~(np.isin(range(num_cond), args['dichotomies'][0])^np.isin(range(num_cond), args['dichotomies'][1])))[0]
        ## Loop over dichotomies
        # D = assistants.Dichotomies(num_cond, args['dichotomies']+[xor], extra=50)
        
        if not skip_dichs:
            # choose dichotomies to have a particular order
            task_dics = []
            for d in task.positives:
                if 0 in d:
                    task_dics.append(d)
                else:
                    task_dics.append(list(np.setdiff1d(range(num_cond),d)))
            # if num_cond>8:
Esempio n. 17
0
# convert it into a feature-generator
g_ = nx.DiGraph()
_ = [[[g_.add_edge(e[0]+a*1j, e[1]+b*1j) for b in range(K)] for a,e in enumerate(g.out_edges(n))] for n in g.nodes]

roots = [node for node in g_.nodes() if g_.out_degree(node)!=0 and g_.in_degree(node)==0]
leaves = [node for node in g_.nodes() if g_.in_degree(node)!=0 and g_.out_degree(node)==0]

_ = [g_.add_edge(0, n) for n in roots]

data = [np.array(list(nx.algorithms.simple_paths.all_simple_paths(g_, 0, n))).squeeze() for n in leaves]

idx = np.concatenate([d.real.astype(int)[1:] for d in data],-1)
val = np.concatenate([d.imag.astype(int)[1:] for d in data],-1)
var = np.concatenate([np.ones(d.shape[-1]-1, dtype=int)*i for i,d in enumerate(data)],-1)

labels = np.zeros((idx.max(),var.max()+1))*np.nan
labels[idx-1,var] = val
reps = np.concatenate([np.where(np.isnan(l), 0, np.eye(K)[:,np.where(np.isnan(l), 0, l).astype(int)]) for l in labels])

plt.figure()
plt.subplot(131)
pos = graphviz_layout(g, prog="twopi")
nx.draw(g, pos, node_color=np.array(g.nodes).astype(int), cmap='nipy_spectral')
dicplt.square_axis()
plt.subplot(132)
plt.imshow(labels)
plt.subplot(133)
plt.imshow(util.dot_product(reps,reps))


 def calculate_margin(self,feature,weight,training_label):
    score = util.dot_product(weight,feature)
    return training_label* score 
Esempio n. 19
0
pn2 = np.array(pn2)
w_nrm = np.array(w_nrm)
    
dem = np.where(pn1>0,pn1,1)*np.where(pn2>0,pn2,1)

#%%
# this_nonlin = RayLou()
# this_nonlin = TanAytch()
# this_nonlin = NoisyTanAytch(noise)
# this_nonlin = HardTanAytch()
this_nonlin = Iden()
# this_nonlin = Poftslus(1)
# this_nonlin = NoisyRayLou(1.0)
x_ = input_task(np.unique(inp_condition),noise=0).detach().numpy().T

bwa = (np.random.rand(200,10000)-0.5)*0.2

nya = this_nonlin.deriv(torch.tensor(bwa.T@x_))
nyanya = nya[:,:,None]*nya[:,None,:]

K = ((nyanya*util.dot_product(x_,x_)[None,:,:]).mean(0))

l,v = la.eig(K)

idx = np.argsort(-l)
vecs = v[:,idx].T@x_.T




 def calculate_margin(self,feature,weights,training_label):
    score = util.dot_product(weights, feature)
    normal_margin = training_label*score *1.0
    return normal_margin
Esempio n. 21
0
 def dotProduct(self, a, b):
     return util.dot_product(a,b)