Ejemplo n.º 1
0
Archivo: copia.py Proyecto: zenna/dddt
def neural_queue(element_type, queue_type):
  enqueue_img = EnqueueNet(queue_type, element_type, arch=MLPNet)
  dequeue_img = DequeueNet(queue_type, element_type, MLPNet)
  empty_queue = ConstantNet(queue_type)
  neural_ref = ModuleDict({"enqueue": enqueue_img,
                           "dequeue": dequeue_img,
                           "empty": empty_queue})
  cuda(neural_ref)
  return neural_ref
Ejemplo n.º 2
0
Archivo: nqueue.py Proyecto: zenna/asl
def neural_queue(element_type, queue_type):
    enqueue_img = EnqueueNet(queue_type, element_type)
    dequeue_img = DequeueNet(queue_type, element_type)
    empty_queue = ConstantNet(queue_type)
    neural_ref = ModuleDict({
        "enqueue": enqueue_img,
        "dequeue": dequeue_img,
        "empty": empty_queue
    })
    cuda(neural_ref)
    return neural_ref
Ejemplo n.º 3
0
def test_reverse_sketch():
  "Test Reverse Sketch"
  batch_size = 128
  tl = mnistloader(batch_size)
  items_iter = iter(tl)

  matrix_stack = Type("Stack", (1, 28, 28), dtype="float32")
  mnist_type = Type("mnist_type", (1, 28, 28), dtype="float32")
  nstack = neural_stack(mnist_type, matrix_stack)
  refstack = ref_stack()
  rev_sketch = ReverseSketch(Type, nstack, refstack)

  rev_items_iter = iter(tl)
  cuda(rev_sketch)
  optimizer = optim.Adam(rev_sketch.parameters(), lr=0.0001)

  def plot_items(i, log, writer, **kwargs):
    writer.add_image('fwd/1', log['forward'][0][0][0], i)
    writer.add_image('fwd/2', log['forward'][0][1][0], i)
    writer.add_image('fwd/3', log['forward'][0][2][0], i)
    writer.add_image('rev/1', log['reverse'][0][0][0], i)
    writer.add_image('rev/2', log['reverse'][0][1][0], i)
    writer.add_image('rev/3', log['reverse'][0][2][0], i)
    writer.add_image('out/1', log['out'][0][0][0], i)
    writer.add_image('out/2', log['out'][0][1][0], i)
    writer.add_image('out/3', log['out'][0][2][0], i)

  def loss_gen():
    nonlocal items_iter, rev_items_iter
    # Refresh hte iterators if they run out
    try:
      items = iterget(items_iter, 3, transform=train_data)
      rev_items = iterget(rev_items_iter, 3, transform=train_data)
    except StopIteration:
      print("End of Epoch")
      items_iter = iter(tl)
      rev_items_iter = iter(tl)
      items = iterget(items_iter, 3, transform=train_data)
      rev_items = iterget(rev_items_iter, 3, transform=train_data)

    out_items = rev_sketch(items)
    rev_items.reverse()
    log_append("forward", items)
    log_append("reverse", rev_items)
    log_append("out", out_items)

    losses = [nn.MSELoss()(out_items[i], rev_items[i]) for i in range(3)]
    loss = sum(losses)
    return loss

  train(loss_gen, optimizer, [every_n(plot_items, 100)])
Ejemplo n.º 4
0
Archivo: reverse.py Proyecto: zenna/asl
def benchmark_copy_sketch(batch_size, stack_len, seq_len, arch, log_dir, lr,
                          arch_opt, **kwargs):
    stack_len = stack_len
    seq_len = seq_len  # From paper: between 1 and 20
    BernSeq = bern_seq(seq_len)
    MatrixStack = matrix_stack(1, seq_len, seq_len)
    arch_opt['combine_inputs'] = lambda xs: stretch_cat(
        xs, MatrixStack.size, 2)
    arch_opt['activation'] = F.sigmoid
    nstack = ModuleDict({
        'push':
        PushNet(MatrixStack, BernSeq, arch=ConvNet, arch_opt=arch_opt),
        'pop':
        PopNet(MatrixStack, BernSeq, arch=ConvNet, arch_opt=arch_opt),
        'empty':
        ConstantNet(MatrixStack)
    })

    refstack = ref_stack()
    copy_sketch = CopySketch(BernSeq, nstack, refstack, seq_len)
    cuda(copy_sketch)
    bern_iter = BernSeq.iter(batch_size)

    def loss_gen():
        # Should copy the sequence, therefore the output should
        items = take(bern_iter, seq_len)
        rev_items = items.copy()
        rev_items.reverse()
        outputs = copy_sketch(items)
        log("outputs", outputs)
        log("items", items)
        log("rev_items", rev_items)

        # import pdb; pdb.set_trace()
        return vec_dist(outputs, rev_items, dist=nn.BCELoss())

    optimizer = optim.Adam(copy_sketch.parameters(), lr)
    train(
        loss_gen,
        optimizer,
        cont=converged(1000),
        callbacks=[
            print_loss(100),
            every_n(plot_sketch, 500),
            #  common.plot_empty,
            #  common.plot_observes,
            save_checkpoint(1000, copy_sketch)
        ],
        log_dir=log_dir)
Ejemplo n.º 5
0
Archivo: reverse.py Proyecto: zenna/asl
def onehot(i, onehot_len, batch_size):
    # Dummy input that HAS to be 2D for the scatter (you can use view(-1,1) if needed)
    y = torch.LongTensor(batch_size, 1).fill_(i)
    # One hot encoding buffer that you create out of the loop and just keep reusing
    y_onehot = torch.FloatTensor(batch_size, onehot_len)
    # In your for loop
    y_onehot.zero_()
    return Variable(cuda(y_onehot.scatter_(1, y, 1)), requires_grad=False)
Ejemplo n.º 6
0
Archivo: copia.py Proyecto: zenna/asl
def benchmark_copy_sketch():
    opt = handle_args()
    string_len = 8

    class SeqStack(Type):
        size = (string_len, 1)

    class BernSeq(Type):
        size = (string_len, 1)

    def bern_eq(*shape):
        return cuda(torch.bernoulli(torch.ones(*shape).fill_(0.5)))

    seq_sampler = infinite_samples(bern_eq, opt.batch_size, (string_len, 1),
                                   True)
    nqueue = neural_queue(SeqStack, BernSeq)
    refqueue = ref_queue()
    seq_len = 3  # From paper: between 1 and 20
    copy_sketch = CopySketch(Type, nqueue, refqueue, seq_len)
    cuda(copy_sketch)

    def loss_gen():
        items = iterget(seq_sampler, seq_len)
        target_items = copy(items)
        outputs = copy_sketch(items)
        log_append("outputs", outputs)
        log_append("items", items)
        import pdb
        pdb.set_trace()
        losses = [
            nn.BCELoss()(outputs[i], target_items[i]) for i in range(seq_len)
        ]
        loss = sum(losses)
        print("LOSS", loss)
        return loss

    every = 100
    print_loss_gen = print_loss(every)
    next(print_loss_gen)

    optimizer = optim.Adam(copy_sketch.parameters(), opt.lr)
    print(opt)
    train(loss_gen,
          optimizer,
          cont=partial(max_iters, maxiters=1000000),
          callbacks=[every_n(plot_items, 100), print_loss_gen])
Ejemplo n.º 7
0
Archivo: copia.py Proyecto: zenna/dddt
def benchmark_copy_sketch():
  opt = handle_args()
  string_len = 8

  class SeqStack(Type):
    size = (string_len, 1)

  class BernSeq(Type):
    size = (string_len, 1)

  def bern_eq(*shape):
    return cuda(torch.bernoulli(torch.ones(*shape).fill_(0.5)))

  seq_sampler = infinite_samples(bern_eq, opt.batch_size, (string_len, 1), True)
  nqueue = neural_queue(SeqStack, BernSeq)
  refqueue = ref_queue()
  seq_len = 3  # From paper: between 1 and 20
  copy_sketch = CopySketch(Type, nqueue, refqueue, seq_len)
  cuda(copy_sketch)

  def loss_gen():
    items = iterget(seq_sampler, seq_len)
    target_items = copy(items)
    outputs = copy_sketch(items)
    log_append("outputs", outputs)
    log_append("items", items)
    import pdb; pdb.set_trace()
    losses = [nn.BCELoss()(outputs[i], target_items[i]) for i in range(seq_len)]
    loss = sum(losses)
    print("LOSS", loss)
    return loss

  every = 100
  print_loss_gen = print_loss(every)
  next(print_loss_gen)

  optimizer = optim.Adam(copy_sketch.parameters(), opt.lr)
  print(opt)
  train(loss_gen, optimizer, cont=partial(max_iters, maxiters=1000000),
        callbacks=[every_n(plot_items, 100), print_loss_gen])
Ejemplo n.º 8
0
def onehot1d(enum, length=None):
  "Encode an Enum as a one hot vector"
  EnumOneHot1D = compound_encoding(enum.__class__.__bases__[0], OneHot1D)
  length = EnumOneHot1D.typesize[0] if length is None else length
  return EnumOneHot1D(Variable(cuda(onehot(enum.value, length, 1))))
Ejemplo n.º 9
0
Archivo: types.py Proyecto: zenna/dddt
 def sample(*shape):
   return Variable(cuda(torch.bernoulli(torch.ones(*shape).fill_(0.5))))
Ejemplo n.º 10
0
Archivo: copia.py Proyecto: zenna/dddt
 def choice1f(self, outlen):
   proj = Variable(cuda(torch.rand(self.choice_len, outlen)))
   return torch.matmul(self.choice1, proj)
Ejemplo n.º 11
0
Archivo: copia.py Proyecto: zenna/dddt
 def bern_eq(*shape):
   return cuda(torch.bernoulli(torch.ones(*shape).fill_(0.5)))
Ejemplo n.º 12
0
Archivo: copia.py Proyecto: zenna/asl
 def choice1f(self, outlen):
     proj = Variable(cuda(torch.rand(self.choice_len, outlen)))
     return torch.matmul(self.choice1, proj)
Ejemplo n.º 13
0
Archivo: copia.py Proyecto: zenna/asl
 def bern_eq(*shape):
     return cuda(torch.bernoulli(torch.ones(*shape).fill_(0.5)))
Ejemplo n.º 14
0
def onehot1d(enum, length=None):
    "Encode an Enum as a one hot vector"
    EnumOneHot1D = compound_encoding(enum.__class__.__bases__[0], OneHot1D)
    length = EnumOneHot1D.typesize[0] if length is None else length
    return EnumOneHot1D(Variable(cuda(onehot(enum.value, length, 1))))