Beispiel #1
0
 def write_passive(self,
                   key,
                   value,
                   eta=0.01,
                   num_epochs=1000,
                   verbose=0,
                   term=0.5,
                   batch=True):
     # stops if each actual is within term of target
     cheat_keys = self.kv_cheat.keys()
     cheat_error_history = np.nan * np.ones(
         (len(self.kv_cheat), num_epochs))
     write_error_history = np.nan * np.ones((num_epochs, ))
     history_fun = lambda e: np.fabs(e).max()
     # history_fun = lambda e: (e**2).sum()
     # train k -> v with gradient descent, incorporating passive loop
     current_key = key
     dW = {k: np.zeros(self.W[k].shape) for k in self.W}
     for epoch in range(num_epochs):
         if (current_key == key).all():
             if batch:
                 # batch W update
                 for k in self.W:
                     self.W[k] += dW[k]
                     dW[k] *= 0
             x = mu.forward_pass(key, self.W, self.f)
             e = x[self.L] - value
             # early termination:
             if (np.fabs(e) < term).all(): break
             write_error_history[epoch] = history_fun(e)
             E = 0.5 * (e**2).sum()
         else:
             x = mu.forward_pass(current_key, self.W, self.f)
             e = x[self.L] - np.sign(x[self.L])
         # Progress update:
         if epoch % int(num_epochs / 10) == 0:
             if verbose > 0: print('%d: %f' % (epoch, E))
         # Weight update:
         y = mu.backward_pass(x, e, self.W, self.df)
         G = mu.error_gradient(x, y)
         for k in self.W:
             if batch:
                 dW[k] += -eta * G[k]  # batch
             else:
                 self.W[k] += -eta * G[k]  # stochastic
         # Track write/any catastrophic forgetting
         for ck in range(len(cheat_keys)):
             x_ck = mu.forward_pass(mu.unhash_pattern(cheat_keys[ck]),
                                    self.W, self.f)
             e_ck = x_ck[self.L] - self.kv_cheat[cheat_keys[ck]]
             cheat_error_history[ck, epoch] = history_fun(e_ck)
         # update current key
         if (current_key == self.last_key).all():
             current_key = self.first()
         else:
             current_key = self.next(current_key)
     # update cheats
     self.write_error_history = write_error_history[:epoch]
     self.cheat_error_history = cheat_error_history[:, :epoch]
     self.kv_cheat[mu.hash_pattern(key)] = value
 def write(self, k, v, num_epochs=None):
     if num_epochs is None: self.net.write(k,v)
     else: self.net.write(k,v,num_epochs=num_epochs)
     self.key_value_map[mu.hash_pattern(k)] = v
     if len(self.key_value_map) >= 2**self.net.layer_size():
         print('Warning: Address capacity reached!')
     self.passive_ticks()        
 def memory_string(self, to_int=True):
     def _val(k):
         if mu.hash_pattern(k) in self.key_value_map:
             return self.key_value_map[mu.hash_pattern(k)]
         else: return np.nan*np.ones((self.net.layer_size(),1))
     # key sequence
     k = self.first()
     keys = [k]
     values = [_val(k)]
     while mu.hash_pattern(k) in self.key_sequence:
         k = self.key_sequence[mu.hash_pattern(k)]
         keys.append(k)
         values.append(_val(k))
     if to_int:
         string = 'keys/values:\n'
         string += str(np.concatenate([
             mu.patterns_to_ints(keys),
             mu.patterns_to_ints(values)],axis=0))
     else:
         string = 'keys:\n'
         string += str(np.concatenate(keys,axis=1))
         string += '\nvalues:\n'
         string += str(np.concatenate(values,axis=1))
     return string
Beispiel #4
0
 def write_vanilla(self,
                   key,
                   value,
                   eta=0.01,
                   num_epochs=1000,
                   verbose=0,
                   term=0.5):
     cheat_keys = self.kv_cheat.keys()
     cheat_error_history = np.empty((len(self.kv_cheat), num_epochs))
     write_error_history = np.empty((num_epochs, ))
     # history_fun = lambda e: np.fabs(e).max()
     history_fun = lambda e: (e**2).sum()
     # train k -> v with gradient descent, incorporating passive loop
     for epoch in range(num_epochs):
         x = mu.forward_pass(key, self.W, self.f)
         # early termination:
         if (np.fabs(x[self.L] - value) < term).all(): break
         # Progress update:
         e = x[self.L] - value
         if epoch % int(num_epochs / 10) == 0:
             E = 0.5 * (e**2).sum()
             if verbose > 0: print('%d: %f' % (epoch, E))
             if verbose > 1:
                 print(x[self.L].T)
                 print(value.T)
         # Weight update:
         y = mu.backward_pass(x, e, self.W, self.df)
         G = mu.error_gradient(x, y)
         for k in self.W:
             self.W[k] += -eta * G[k]
         # Track write/any catastrophic forgetting
         write_error_history[epoch] = history_fun(e)
         for ck in range(len(cheat_keys)):
             x_ck = mu.forward_pass(mu.unhash_pattern(cheat_keys[ck]),
                                    self.W, self.f)
             e_ck = x_ck[self.L] - self.kv_cheat[cheat_keys[ck]]
             cheat_error_history[ck, epoch] = history_fun(e_ck)
     # update cheats
     self.write_error_history = write_error_history[:epoch]
     self.cheat_error_history = cheat_error_history[:, :epoch]
     self.kv_cheat[mu.hash_pattern(key)] = value
 def write(self,k,v):
     self.key_value_map[mu.hash_pattern(self._noise(k))] = self._noise(v)
 def read(self,k):
     if mu.hash_pattern(k) in self.key_value_map:
         return self.key_value_map[mu.hash_pattern(k)]
     else:
         return np.sign(np.random.randn(self.N,1))
 def _val(k):
     if mu.hash_pattern(k) in self.key_value_map:
         return self.key_value_map[mu.hash_pattern(k)]
     else: return np.nan*np.ones((self.net.layer_size(),1))
 def next(self, k):
     k_next = self.net.next(k)
     self.key_sequence[mu.hash_pattern(k)] = k_next
     self.passive_ticks()
     return k_next