def main(params): # main training and validation loop goes here # This code should be independent of which model we use batch_size = params["batch_size"] max_epochs = params["max_epochs"] # fetch the data provider object dp = DataProvider(params) # Get the solver object solver = Solver(params["solver"]) ## Add the model intiailization code here modelObj = getModelObj(params) # Now let's build a gradient computation graph and rmsprop update mechanism # grads = tensor.grad(cost, wrt=model.values()) # lr = tensor.scalar(name='lr',dtype=config.floatX) # f_grad_shared, f_update, zg, rg, ud = solver.build_solver_model(lr, model, grads, # inp_list, cost, params) num_frames_total = dp.getSplitSize("train") num_iters_one_epoch = num_frames_total / batch_size max_iters = max_epochs * num_iters_one_epoch for it in xrange(max_iters): batch = dp.getBatch(batch_size)