Exemple #1
0
              num_hidden_layers=args.nhlayer,
              dropout=args.dropout,
              nr_cells=mem_slot,
              cell_size=mem_size,
              read_heads=read_heads,
              gpu_id=args.cuda,
              debug=args.debug,
              batch_first=True,
              independent_linears=True)

    print(rnn)
    if args.cuda != -1:
        rnn = rnn.cuda(args.cuda)

    last_save_losses = []
    optimizer = optim.Adam(rnn.parameters(),
                           lr=args.lr,
                           eps=1e-9,
                           betas=[0.9, 0.98])

    check_ptr = os.path.join(ckpts_dir, 'best.pth')
    if os.path.isfile(check_ptr):
        curr_state = T.load(check_ptr)
        epoch = curr_state["epoch"] + 1
        rnn.load_state_dict(curr_state["rnn_state"])
        optimizer.load_state_dict(curr_state["opti_state"])
        print("Model loaded.")
    else:
        epoch = 1

    (chx, mhx, rv) = (None, None, None)
                  gpu_id=args.cuda,
                  debug=args.visdom,
                  batch_first=True,
                  independent_linears=False)
    else:
        raise Exception('Not recognized type of memory')

    if args.cuda != -1:
        rnn = rnn.cuda(args.cuda)

    print(rnn)

    last_save_losses = []

    if args.optim == 'adam':
        optimizer = optim.Adam(rnn.parameters(),
                               lr=args.lr,
                               eps=1e-9,
                               betas=[0.9, 0.98])  # 0.0001
    elif args.optim == 'adamax':
        optimizer = optim.Adamax(rnn.parameters(),
                                 lr=args.lr,
                                 eps=1e-9,
                                 betas=[0.9, 0.98])  # 0.0001
    elif args.optim == 'rmsprop':
        optimizer = optim.RMSprop(rnn.parameters(),
                                  lr=args.lr,
                                  momentum=0.9,
                                  eps=1e-10)  # 0.0001
    elif args.optim == 'sgd':
        optimizer = optim.SGD(rnn.parameters(), lr=args.lr)  # 0.01
Exemple #3
0
          dropout=args.dropout,
          nr_cells=args.mem_slot,
          cell_size=args.mem_size,
          read_heads=args.read_heads,
          gpu_id=args.cuda,
          debug=args.visdom,
          batch_first=True,
          independent_linears=True)

if args.cuda != -1:
    rnn = rnn.cuda(args.cuda)

    print(rnn)

if args.optim == 'adam':
    optimizer = optim.Adam(rnn.parameters(),
                           lr=args.lr,
                           eps=1e-9,
                           betas=[0.9, 0.98])  # 0.0001
elif args.optim == 'adamax':
    optimizer = optim.Adamax(rnn.parameters(),
                             lr=args.lr,
                             eps=1e-9,
                             betas=[0.9, 0.98])  # 0.0001
elif args.optim == 'rmsprop':
    optimizer = optim.RMSprop(rnn.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              eps=1e-10)  # 0.0001
elif args.optim == 'sgd':
    optimizer = optim.SGD(rnn.parameters(), lr=args.lr)  # 0.01
        dropout=args.dropout,
        nr_cells=mem_slot,
        cell_size=mem_size,
        read_heads=read_heads,
        gpu_id=args.cuda,
        debug=args.debug,
        batch_first=True,
        independent_linears=True
    )

    print(rnn)
    if args.cuda != -1:
        rnn = rnn.cuda(args.cuda)

    last_save_losses = []
    optimizer = optim.Adam(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98])

    check_ptr = os.path.join(ckpts_dir, 'best.pth')
    if os.path.isfile(check_ptr):
        curr_state = T.load(check_ptr)
        epoch = curr_state["epoch"] + 1
        rnn.load_state_dict(curr_state["rnn_state"])
        optimizer.load_state_dict(curr_state["opti_state"])
        print("Model loaded.")
    else:
        epoch = 1

    (chx, mhx, rv) = (None, None, None)

    for epoch in range(epoch,args.iterations + 1):
        llprint("\rIteration {ep}/{tot}".format(ep=epoch, tot=args.iterations))
Exemple #5
0
        independent_linears=independent_linears
    )
    else:
        raise Exception('Not recognized type of memory')

    if args.model != "":
        rnn.load_state_dict(torch.load(args.model))

    # Print the structure of the rnn
    print(rnn)

    if args.cuda != -1:
        rnn = rnn.cuda(args.cuda)

    if args.optim == 'adam':
        optimizer = optim.Adam(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
    elif args.optim == 'adamax':
        optimizer = optim.Adamax(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
    elif args.optim == 'rmsprop':
        optimizer = optim.RMSprop(rnn.parameters(), lr=args.lr, momentum=0.9, eps=1e-10) # 0.0001
    elif args.optim == 'sgd':
        optimizer = optim.SGD(rnn.parameters(), lr=args.lr) # 0.01
    elif args.optim == 'adagrad':
        optimizer = optim.Adagrad(rnn.parameters(), lr=args.lr)
    elif args.optim == 'adadelta':
        optimizer = optim.Adadelta(rnn.parameters(), lr=args.lr)


    # List for keeping useful data
    last_costs = []
    last_costs_memory = []