Ejemplo n.º 1
0
    def __init__(self, keras_model):
        print "QNetKeras constructor"
        QNet.__init__(self)
        self.Model = keras_model

        # build rainable model

        y_pred = self.Model.output
        nactinos = self.Model.output.shape[-1]
        y_true = Input(name='y_true', shape=y_pred.shape)
        mask = Input(name='mask', shape=y_pred.shape)
        loss_out = Lambda(masked_error, output_shape=(1, ),
                          name='loss')([y_true, y_pred, mask])
        ins = [self.Model.input] if type(
            self.Model.input) is not list else self.Model.input
        trainable_model = Model(inputs=ins + [y_true, mask],
                                outputs=[loss_out, y_pred])
        assert len(trainable_model.output_names) == 2
        #combined_metrics = {trainable_model.output_names[1]: metrics}
        losses = [
            lambda y_true, y_pred: y_pred,  # loss is computed in Lambda layer
            lambda y_true, y_pred: K.zeros_like(
                y_pred),  # we only include this for the metrics
        ]
        trainable_model.compile(optimizer=optimizer,
                                loss=losses)  #, metrics=combined_metrics)
        self.TrainableModel = trainable_model
Ejemplo n.º 2
0
def model(img_size):
    
        inp = InputLayer(img_size)
    
        r = Reshape(inp, inp.shape + (1,))
    
        c1 = ReLU(
            Pool3D(
                Conv3D(r, 5, 5, 5, 20),
                (2, 2, 2)
            )
        )
        print c1.shape
        c2 = ReLU(
            Pool3D(
                Conv3D(c1, 3, 3, 3, 30),
                (2, 2, 2)
            )
        )
        print c2.shape
        f = Flatten(c2)
        print f.shape
    
        l = Tanh(Linear(f, 20))
        out = Linear(l, 2)
        return Model([inp], out, LogRegression(out), applier_class=AdaDeltaApplier)
Ejemplo n.º 3
0
 def defaultNetwork(self, env):
     in_size = env.StateVectorSize
     out_size = env.NActions
     inp = InputLayer((in_size, ))
     w = 0.0
     hsize = (in_size + out_size) * 3
     h1 = Tanh(Linear(inp, hsize, name="H1", weight_decay=w))
     h2 = Tanh(Linear(h1, hsize, name="H2", weight_decay=w))
     out = Tanh(Linear(h2, out_size, name="Out", weight_decay=w))
     return Model(inp,
                  out,
                  L2Regression(out),
                  applier_class=AdaDeltaApplier)
Ejemplo n.º 4
0
def cnn2d(nout):
    inp = InputLayer((10,10,3))
    c = Conv(inp, 3, 3, 3)
    p = Pool(c, (2,2))
    
    f = Flatten(p)
    loss = L2Regression(f)
    nn = Model(inp, f, loss)
    #nparams = 0
    #for l in nn.layers:
    #    if isinstance(l, ParamMixin):
    #        for p in l.params():
    #            nparams += p.size
    #print nparams
    return nn
Ejemplo n.º 5
0
def simple(img_size):
    inp = InputLayer(img_size)
    r = Reshape(inp, inp.shape + (1,))
    c = Conv3D(r, 3, 3, 3, 2)
    print "c.shape=",c.shape
    p = Pool3D(c, (2, 2, 2))
    print "p.shape=",p.shape
    f = Flatten(p)
    print "f.shape=", f.shape
    m = Model([inp], f, L2Regression(f))
    x = np.random.random((3,)+img_size)
    y = m(x)
    print "c.out=", c.Y.shape
    print "p.out=", p.Y.shape
    print "f.out=", f.Y.shape
    print y.shape
Ejemplo n.º 6
0
 def defaultNetwork(self, env):
     state_size = env.StateVectorSize
     in_size = state_size + 2  # + action and reward
     out_size = env.NActions
     inp = InputLayer((None, in_size))
     hidden_size = (in_size + out_size) * 2
     w = 0.0
     r1 = Tanh(
         LSTM(inp, hidden_size, hidden_size, name="R1", weight_decay=w))
     r2 = Tanh(LSTM(r1, hidden_size, hidden_size, name="R2",
                    weight_decay=w))
     out = Sigmoid(
         LSTM(r2, out_size, hidden_size, name="Out", weight_decay=w))
     return Model(inp,
                  out,
                  L2Regression(out),
                  applier_class=AdaDeltaApplier)
Ejemplo n.º 7
0
def cnn3d(nout):
    inp = InputLayer((20,20,20, 1))
    c1 = Conv3D(inp, 3, 3, 3, 5)
    p1 = Pool3D(c1, (2,2,2))
    c2 = Conv3D(p1, 3, 3, 3, 10)
    p2 = Pool3D(c2, (2,2,2))
    
    f = Flatten(p2)
    loss = L2Regression(f)
    nn = Model(inp, f, loss)
    #nparams = 0
    #for l in nn.layers:
    #    if isinstance(l, ParamMixin):
    #        for p in l.params():
    #            nparams += p.size
    #print nparams
    return nn
Ejemplo n.º 8
0
def pool(nout):

	inp = InputLayer((9,8,5))
	pool = Pool(inp, (3,4))
	return Model(inp, pool, L2Regression(pool))