Beispiel #1
0
 def write_passive(self,
                   key,
                   value,
                   eta=0.01,
                   num_epochs=1000,
                   verbose=0,
                   term=0.5,
                   batch=True):
     # stops if each actual is within term of target
     cheat_keys = self.kv_cheat.keys()
     cheat_error_history = np.nan * np.ones(
         (len(self.kv_cheat), num_epochs))
     write_error_history = np.nan * np.ones((num_epochs, ))
     history_fun = lambda e: np.fabs(e).max()
     # history_fun = lambda e: (e**2).sum()
     # train k -> v with gradient descent, incorporating passive loop
     current_key = key
     dW = {k: np.zeros(self.W[k].shape) for k in self.W}
     for epoch in range(num_epochs):
         if (current_key == key).all():
             if batch:
                 # batch W update
                 for k in self.W:
                     self.W[k] += dW[k]
                     dW[k] *= 0
             x = mu.forward_pass(key, self.W, self.f)
             e = x[self.L] - value
             # early termination:
             if (np.fabs(e) < term).all(): break
             write_error_history[epoch] = history_fun(e)
             E = 0.5 * (e**2).sum()
         else:
             x = mu.forward_pass(current_key, self.W, self.f)
             e = x[self.L] - np.sign(x[self.L])
         # Progress update:
         if epoch % int(num_epochs / 10) == 0:
             if verbose > 0: print('%d: %f' % (epoch, E))
         # Weight update:
         y = mu.backward_pass(x, e, self.W, self.df)
         G = mu.error_gradient(x, y)
         for k in self.W:
             if batch:
                 dW[k] += -eta * G[k]  # batch
             else:
                 self.W[k] += -eta * G[k]  # stochastic
         # Track write/any catastrophic forgetting
         for ck in range(len(cheat_keys)):
             x_ck = mu.forward_pass(mu.unhash_pattern(cheat_keys[ck]),
                                    self.W, self.f)
             e_ck = x_ck[self.L] - self.kv_cheat[cheat_keys[ck]]
             cheat_error_history[ck, epoch] = history_fun(e_ck)
         # update current key
         if (current_key == self.last_key).all():
             current_key = self.first()
         else:
             current_key = self.next(current_key)
     # update cheats
     self.write_error_history = write_error_history[:epoch]
     self.cheat_error_history = cheat_error_history[:, :epoch]
     self.kv_cheat[mu.hash_pattern(key)] = value
Beispiel #2
0
 def passive_ticks(self, num_ticks, eta=0.01, batch=True, verbose=0):
     for t in range(num_ticks):
         x = mu.forward_pass(self.current_key, self.W_kv, self.f_kv)
         value = np.sign(x[self.L_kv])
         # Progress update:
         e = x[self.L_kv] - value
         # Weight update:
         y = mu.backward_pass(x, e, self.W_kv, self.df_kv)
         G = mu.error_gradient(x, y)
         for k in self.W_kv:
             if batch:
                 self.dW_kv[k] += -eta * G[k]
                 if (self.current_key == self.last_key).all():
                     self.W_kv[k] += self.dW_kv[k]
                     self.dW_kv[k] *= 0
             else:
                 self.W_kv[k] += -eta * G[k]
         # passive advance
         if verbose > 0:
             print('passive tick %d:  k->v %s (|>| %f)' %
                   (t,
                    mu.patterns_to_ints(
                        np.concatenate((self.current_key, value), axis=1)),
                    np.fabs(x[self.L_kv]).min()))
         if (self.current_key == self.last_key).all():
             self.current_key = self.first()
         else:
             self.current_key = self.next(self.current_key)
Beispiel #3
0
 def train_sequence(self, eta=0.01, num_epochs=1000, term=0.1, verbose=0):
     sequence = mu.random_patterns(self.layer_size(), self.memory_size)
     keys = sequence[:, :-1]
     next_keys = sequence[:, 1:]
     error_history = np.empty((num_epochs, ))
     # history_fun = lambda e: np.fabs(e).max()
     history_fun = lambda e: (e**2).sum()
     # train k -> v with gradient descent, incorporating passive loop
     for epoch in range(num_epochs):
         x = mu.forward_pass(keys, self.W_sq, self.f_sq)
         e = x[self.L_sq] - next_keys
         E = 0.5 * (e**2).sum()
         # early termination:
         if (np.fabs(e) < term).all(): break
         # Progress update:
         if epoch % int(num_epochs / 10) == 0:
             if verbose > 0: print('%d: %f' % (epoch, E))
             if verbose > 1:
                 print(x[self.L_sq].T)
                 print(value.T)
         # Weight update:
         y = mu.backward_pass(x, e, self.W_sq, self.df_sq)
         G = mu.error_gradient(x, y)
         for k in self.W_sq:
             self.W_sq[k] += -eta * G[k]
         # learning curve
         error_history[epoch] = history_fun(e)
     # update history
     self.sequence_error_history = error_history[:epoch]
     # update key tracking
     self.first_key = sequence[:, [0]]
     self.last_key = sequence[:, [0]]
     self.current_key = sequence[:, [0]]
Beispiel #4
0
 def write_vanilla(self,
                   key,
                   value,
                   eta=0.01,
                   num_epochs=1000,
                   verbose=0,
                   term=0.5):
     cheat_keys = self.kv_cheat.keys()
     cheat_error_history = np.empty((len(self.kv_cheat), num_epochs))
     write_error_history = np.empty((num_epochs, ))
     # history_fun = lambda e: np.fabs(e).max()
     history_fun = lambda e: (e**2).sum()
     # train k -> v with gradient descent, incorporating passive loop
     for epoch in range(num_epochs):
         x = mu.forward_pass(key, self.W, self.f)
         # early termination:
         if (np.fabs(x[self.L] - value) < term).all(): break
         # Progress update:
         e = x[self.L] - value
         if epoch % int(num_epochs / 10) == 0:
             E = 0.5 * (e**2).sum()
             if verbose > 0: print('%d: %f' % (epoch, E))
             if verbose > 1:
                 print(x[self.L].T)
                 print(value.T)
         # Weight update:
         y = mu.backward_pass(x, e, self.W, self.df)
         G = mu.error_gradient(x, y)
         for k in self.W:
             self.W[k] += -eta * G[k]
         # Track write/any catastrophic forgetting
         write_error_history[epoch] = history_fun(e)
         for ck in range(len(cheat_keys)):
             x_ck = mu.forward_pass(mu.unhash_pattern(cheat_keys[ck]),
                                    self.W, self.f)
             e_ck = x_ck[self.L] - self.kv_cheat[cheat_keys[ck]]
             cheat_error_history[ck, epoch] = history_fun(e_ck)
     # update cheats
     self.write_error_history = write_error_history[:epoch]
     self.cheat_error_history = cheat_error_history[:, :epoch]
     self.kv_cheat[mu.hash_pattern(key)] = value
Beispiel #5
0
 def next(self, key):
     next_key = np.sign(
         mu.forward_pass(key, self.W_sq, self.f_sq)[self.L_sq])
     if (self.last_key == key).all(): self.last_key = next_key
     return next_key
Beispiel #6
0
 def read(self, key):
     return np.sign(mu.forward_pass(key, self.W, self.f)[self.L])
Beispiel #7
0
 def read(self, key):
     return np.sign(mu.forward_pass(key, self.W_kv, self.f_kv)[self.L_kv])