示例#1
0
 def _load_model(self, model_path=None, tic=None):
     print("loading model begin({})".format(time.time() - tic))
     assert model_path is not None
     assert tic is not None
     print("loading model from {}".format(model_path))
     resume_model(model_path, self.net)
     print("loading model done({})".format(time.time() - tic))
示例#2
0
 def _load_model(self, model_path = None, tic = None):
     print("loading model begin({})".format(time.time() - tic))
     assert model_path is not None
     assert tic is not None
     print("loading model from {}".format(model_path))
     resume_model(model_path, self.net)
     print("loading model done({})".format(time.time() - tic))
示例#3
0
    for name, param in net.params.items():
        m = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
        v = momentum * m - learning_rate * T.grad(net.loss, param)
        updates.append((m, v))
        updates.append((param, param + v))
else:
    print("using normal sgd and learning_rate:{}".format(learning_rate.get_value()))
    for name, param in net.params.items():
        print(name, type(param))
        grad = T.grad(net.loss, param)
        updates.append((param, param - learning_rate * grad))

# resume model
if start_epoch > 0:
    resume_path = "../snapshot/{}.pkl".format(start_epoch)
    resume_model(resume_path, net)


# build train function
print("building training function({})".format(time.time() - begin))
train  = theano.function(
        inputs = [],
        outputs = net.loss,
        updates = updates,
        givens = {
            x : x_shared,
            x_mask : x_mask_shared,
            y : y_shared,
            y_clip : y_clip_shared
            }
        )
示例#4
0
                          broadcastable=param.broadcastable)
        v = momentum * m - learning_rate * T.grad(net.loss, param)
        updates.append((m, v))
        updates.append((param, param + v))
else:
    print("using normal sgd and learning_rate:{}".format(
        learning_rate.get_value()))
    for name, param in net.params.items():
        print(name, type(param))
        grad = T.grad(net.loss, param)
        updates.append((param, param - learning_rate * grad))

# resume model
if start_epoch > 0:
    resume_path = "../snapshot/{}.pkl".format(start_epoch)
    resume_model(resume_path, net)

# build train function
print("building training function({})".format(time.time() - begin))
train = theano.function(inputs=[],
                        outputs=net.loss,
                        updates=updates,
                        givens={
                            x: x_shared,
                            x_mask: x_mask_shared,
                            y: y_shared,
                            y_clip: y_clip_shared
                        })

# build test function
print("building testing function({})".format(time.time() - begin))