Exemple #1
0
def clear_monitor_files(training_dir):
    files = detect_monitor_files(training_dir)
    if len(files) == 0:
        return
    for file in files:
        print(file)
        os.unlink(file)

if __name__ == '__main__':

    #REMEMBER!: turtlebot_nn_setup.bash must be executed.
    env = gym.make('Gazebo_ENPH_Ai_Adeept_Awr_Empty_NN-v0')
    outdir = '/tmp/gazebo_gym_experiments/'
    path = '/tmp/turtle_c2_dqn_ep'
    plotter = liveplot.LivePlot(outdir)

    continue_execution = False
    #fill this if continue_execution=True
    resume_epoch = '200' # change to epoch to continue from
    resume_path = path + resume_epoch
    weights_path = resume_path + '.h5'
    monitor_path = resume_path
    params_json  = resume_path + '.json'

    if not continue_execution:
        #Each time we take a sample and update our weights it is called a mini-batch.
        #Each time we run through the entire dataset, it's called an epoch.
        #PARAMETER LIST
        epochs = 10000
        steps = 100
Exemple #2
0
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

D_hist = []
G_hist = []

bound_min = (1, -1)
bound_max = (-1, -1)

plot = lp.LivePlot((800, 600))


def lerp(v0, v1, t):
    return v0 + t * (v1 - v0)


etqdm = tqdm.trange(opt.n_epochs)
for epoch in etqdm:
    btqdm = tqdm.tqdm(enumerate(dataloader), total=len(dataloader))
    for i, (imgs, _) in btqdm:
        valid = Variable(Tensor(imgs.size(0), 1).fill_(1.),
                         requires_grad=False)
        fake = Variable(Tensor(imgs.size(0), 1).fill_(0.), requires_grad=False)

        real_imgs = Variable(imgs.type(Tensor))