Example #1
0
def test_warnings():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])

    with pytest.warns(Warning) as w:
        out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
    warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when using generator with processes.'

    with pytest.warns(None) as w:
        out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
    assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
Example #2
0
def test_sparse_input_validation_split():
    test_input = sparse.random(6, 3, density=0.25).tocsr()
    in1 = Input(shape=(3,), sparse=True)
    out1 = Dense(4)(in1)
    test_output = np.random.random((6, 4))
    model = Model(in1, out1)
    model.compile('rmsprop', 'mse')
    model.fit(test_input, test_output, epochs=1, batch_size=2, validation_split=0.2)
Example #3
0
def test_resnet():
    n = 4
    x = Input(shape=(1, 8, 8))
    y = sequential([
        conv2d_block(n),
        resnet(n)
    ])(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, n, 8, 8)
Example #4
0
def m():
    x = Input(shape=(input_size + output_size, nb_chars))
    m_realness = sequential([
        LSTM(14),
        Dense(1, activation='sigmoid'),
    ])(x)
    m = Model([x], [m_realness])
    m.compile(Adam(), 'mse')
    return m
Example #5
0
def decoder_dummy(label_sizes, nb_filter=16, data_shape=(1, 64, 64), nb_bits=12,
                  optimizer='adam'):

    input = Input(shape=data_shape)
    x = input
    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())

    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),
                  loss_weights={k: decoder_loss_weights(k) for k in losses.keys()})
    return model
Example #6
0
def test_model_multiple_calls():
    x1 = Input(shape=(20,))

    y1 = sequential([
        Dense(10),
        Dense(1),
    ])(x1)
    m1 = Model(x1, y1)

    x2 = Input(shape=(25,))
    y2 = sequential([
        Dense(20),
        m1
    ])(x2)
    m2 = Model(x2, y2)
    m2.compile('adam', 'mse')

    x3 = Input(shape=(20,))
    y3 = sequential([
        Dense(25),
        m2
    ])(x3)
    m3 = Model(x3, y3)
    m3.compile('adam', 'mse')
    m3.train_on_batch(np.zeros((32, 20)), np.zeros((32, 1)))
Example #7
0
def decoder_baseline(label_sizes, nb_bits=12, data_shape=(1, 64, 64),
                     depth=1, nb_filter=16, optimizer='adam'):
    n = nb_filter
    input = Input(shape=data_shape)
    x = sequential([
        conv2d_block(n, depth=depth, pooling='max'),    # 32x32
        conv2d_block(2*n, depth=depth, pooling='max'),  # 16x16
        conv2d_block(4*n, depth=depth, pooling='max'),  # 8x8
        conv2d_block(8*n, depth=depth, pooling='max'),  # 4x4
    ])(input)
    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())
    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),)
    return model
Example #8
0
def test_sparse_placeholder_fit():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
    in1 = Input(shape=(3,))
    in2 = Input(shape=(3,), sparse=True)
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.predict(test_inputs, batch_size=2)
    model.compile('rmsprop', 'mse')
    model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
    model.evaluate(test_inputs, test_outputs, batch_size=2)
    def build(self):
        mc = self.config.model
        in_x = x = Input((14, 10, 9)) # 14 x 10 x 9

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_first_filter_size, padding="same",
                   data_format="channels_first", use_bias=False, kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-"+str(mc.cnn_first_filter_size)+"-"+str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", use_bias=False, 
                    kernel_regularizer=l2(mc.l2_reg), name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)
        policy_out = Dense(self.n_labels, kernel_regularizer=l2(mc.l2_reg), activation="softmax", name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4, kernel_size=1, data_format="channels_first", use_bias=False, 
                    kernel_regularizer=l2(mc.l2_reg), name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu",name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size, kernel_regularizer=l2(mc.l2_reg), activation="relu", name="value_dense")(x)
        value_out = Dense(1, kernel_regularizer=l2(mc.l2_reg), activation="tanh", name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="cchess_model")
        self.graph = tf.get_default_graph()
    def create_policy_value_net(self):
        """create the policy value network """   
        in_x = network = Input((4, self.board_width, self.board_height))

        # conv layers
        network = Conv2D(filters=32, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=64, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=128, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        # action policy layers
        policy_net = Conv2D(filters=4, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        policy_net = Flatten()(policy_net)
        self.policy_net = Dense(self.board_width*self.board_height, activation="softmax", kernel_regularizer=l2(self.l2_const))(policy_net)
        # state value layers
        value_net = Conv2D(filters=2, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        value_net = Flatten()(value_net)
        value_net = Dense(64, kernel_regularizer=l2(self.l2_const))(value_net)
        self.value_net = Dense(1, activation="tanh", kernel_regularizer=l2(self.l2_const))(value_net)

        self.model = Model(in_x, [self.policy_net, self.value_net])
        
        def policy_value(state_input):
            state_input_union = np.array(state_input)
            results = self.model.predict_on_batch(state_input_union)
            return results
        self.policy_value = policy_value
Example #11
0
    def create_network(self):
        x_in = Input((3, 8, 8))
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x_in)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        for _ in range(10):
            x = self._build_residual_block(x)

        res_out = x
        
        x = Conv2D(filters=2, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        policy_out = Dense(8*8+1, activation="softmax", name="policy_out")(x)

        x = Conv2D(filters=1, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(64, activation="relu")(x)
        value_out =  Dense(1, activation="tanh", name="value_out")(x)
        
        self.network = Model(x_in, [policy_out, value_out], name="reversi_model")
        self.compile()
Example #12
0
def test_render_gan_builder_generator_extended():
    labels_shape = (27,)
    z_dim_offset = 50
    builder = RenderGAN(lambda x: tag3d_network_dense(x, nb_units=4),
                        generator_units=4, discriminator_units=4,
                        z_dim_offset=z_dim_offset,
                        labels_shape=(27,))
    bs = 19
    z, z_offset, labels = data(builder, bs)
    real = np.zeros((bs,) + builder.data_shape)

    labels_input = Input(shape=labels_shape)
    z = Input(shape=(z_dim_offset,))
    fake = builder.generator_given_z_and_labels([z, labels_input])
    m = Model([z, labels_input], [fake])
    m.compile('adam', 'mse')
    m.train_on_batch([z_offset, labels], real)
Example #13
0
 def build(self):
     dim_data = self.size_of_input_data_dim
     nb_time_step = self.size_of_input_timesteps
     news_input = Input(shape=(nb_time_step, dim_data))
     lstm = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                 W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh')
     bi_lstm = Bidirectional(lstm, input_shape=(nb_time_step, dim_data), merge_mode='concat')
     all_news_rep = bi_lstm(news_input)
     news_predictions = Dense(1, activation='linear')(all_news_rep)
     self.model = Model(news_input, news_predictions, name="deep rnn for financial news analysis")
Example #14
0
    def build(self):
        enc_size = self.size_of_env_observation()
        argument_size = IntegerArguments.size_of_arguments
        input_enc = InputLayer(batch_input_shape=(self.batch_size, enc_size), name='input_enc')
        input_arg = InputLayer(batch_input_shape=(self.batch_size, argument_size), name='input_arg')
        input_prg = Embedding(input_dim=PROGRAM_VEC_SIZE, output_dim=PROGRAM_KEY_VEC_SIZE, input_length=1,
                              batch_input_shape=(self.batch_size, 1))

        f_enc = Sequential(name='f_enc')
        f_enc.add(Merge([input_enc, input_arg], mode='concat'))
        f_enc.add(MaxoutDense(128, nb_feature=4))
        self.f_enc = f_enc

        program_embedding = Sequential(name='program_embedding')
        program_embedding.add(input_prg)

        f_enc_convert = Sequential(name='f_enc_convert')
        f_enc_convert.add(f_enc)
        f_enc_convert.add(RepeatVector(1))

        f_lstm = Sequential(name='f_lstm')
        f_lstm.add(Merge([f_enc_convert, program_embedding], mode='concat'))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_1'))
        f_lstm.add(RepeatVector(1))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_2'))
        # plot(f_lstm, to_file='f_lstm.png', show_shapes=True)

        f_end = Sequential(name='f_end')
        f_end.add(f_lstm)
        f_end.add(Dense(1, W_regularizer=l2(0.001)))
        f_end.add(Activation('sigmoid', name='sigmoid_end'))

        f_prog = Sequential(name='f_prog')
        f_prog.add(f_lstm)
        f_prog.add(Dense(PROGRAM_KEY_VEC_SIZE, activation="relu"))
        f_prog.add(Dense(PROGRAM_VEC_SIZE, W_regularizer=l2(0.0001)))
        f_prog.add(Activation('softmax', name='softmax_prog'))
        # plot(f_prog, to_file='f_prog.png', show_shapes=True)

        f_args = []
        for ai in range(1, IntegerArguments.max_arg_num+1):
            f_arg = Sequential(name='f_arg%s' % ai)
            f_arg.add(f_lstm)
            f_arg.add(Dense(IntegerArguments.depth, W_regularizer=l2(0.0001)))
            f_arg.add(Activation('softmax', name='softmax_arg%s' % ai))
            f_args.append(f_arg)
        # plot(f_arg, to_file='f_arg.png', show_shapes=True)

        self.model = Model([input_enc.input, input_arg.input, input_prg.input],
                           [f_end.output, f_prog.output] + [fa.output for fa in f_args],
                           name="npi")
        self.compile_model()
        plot(self.model, to_file='model.png', show_shapes=True)
 def load(self, config_path, weight_path):
     if os.path.exists(config_path) and os.path.exists(weight_path):
         logger.debug(f"loading model from {config_path}")
         with open(config_path, "rt") as f:
             self.model = Model.from_config(json.load(f))
         self.model.load_weights(weight_path)
         self.digest = self.fetch_digest(weight_path)
         self.graph = tf.get_default_graph()
         logger.debug(f"loaded model digest = {self.digest}")
         return True
     else:
         logger.debug(f"model files does not exist at {config_path} and {weight_path}")
         return False
Example #16
0
def decoder_resnet(label_sizes, nb_filter=16, data_shape=(1, 64, 64), nb_bits=12,
                   resnet_depth=(3, 4, 6, 3),
                   optimizer='adam'):
    def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
        return sequential([
            BatchNormalization(mode=0, axis=1),
            ELU(),
            Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col,
                          subsample=(subsample, subsample), init="he_normal", border_mode="same")
        ])

    def f(nb_filter, subsample=1):
        return sequential([
            _bn_relu_conv(nb_filter, subsample=subsample),
            _bn_relu_conv(nb_filter),
        ])

    input = Input(shape=data_shape)
    fitlers_by_depth = [nb_filter * 2**i for i in range(len(resnet_depth))]
    print("fitlers_by_depth", fitlers_by_depth)
    x = _bn_relu_conv(nb_filter, 3, 3, subsample=2)(input)
    for i, (n, d) in enumerate(zip(fitlers_by_depth, resnet_depth)):
        for di in range(d):
            if di == 0 and i != 0:
                shortcut = _bn_relu_conv(n, 1, 1, subsample=2)
                subsample = 2
            else:
                shortcut = lambda x: x
                subsample = 1
            x = merge([shortcut(x), f(n, subsample)(x)], mode='sum')

    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())

    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),
                  loss_weights={k: decoder_loss_weights(k) for k in losses.keys()})
    return model
Example #17
0
    def train_f_enc(self, steps_list, epoch=50):
        print("training f_enc")
        f_add0 = Sequential(name='f_add0')
        f_add0.add(self.f_enc)
        f_add0.add(Dense(FIELD_DEPTH))
        f_add0.add(Activation('softmax', name='softmax_add0'))

        f_add1 = Sequential(name='f_add1')
        f_add1.add(self.f_enc)
        f_add1.add(Dense(FIELD_DEPTH))
        f_add1.add(Activation('softmax', name='softmax_add1'))

        env_model = Model(self.f_enc.inputs, [f_add0.output, f_add1.output], name="env_model")
        env_model.compile(optimizer='adam', loss=['categorical_crossentropy']*2)

        for ep in range(epoch):
            losses = []
            for idx, steps_dict in enumerate(steps_list):
                prev = None
                for step in steps_dict['steps']:
                    x = self.convert_input(step.input)[:2]
                    env_values = step.input.env.reshape((4, -1))
                    in1 = np.clip(env_values[0].argmax() - 1, 0, 9)
                    in2 = np.clip(env_values[1].argmax() - 1, 0, 9)
                    carry = np.clip(env_values[2].argmax() - 1, 0, 9)
                    y_num = in1 + in2 + carry
                    now = (in1, in2, carry)
                    if prev == now:
                        continue
                    prev = now
                    y0 = to_one_hot_array((y_num %  10)+1, FIELD_DEPTH)
                    y1 = to_one_hot_array((y_num // 10)+1, FIELD_DEPTH)
                    y = [yy.reshape((self.batch_size, -1)) for yy in [y0, y1]]
                    loss = env_model.train_on_batch(x, y)
                    losses.append(loss)
            print("ep %3d: loss=%s" % (ep, np.average(losses)))
            if np.average(losses) < 1e-06:
                break
Example #18
0
def g():
    seq = Input(shape=(input_size, nb_chars))
    z = Input(shape=(z_size, ))
    z_rep = RepeatVector(input_size)(z)
    seq_and_z = merge([seq, z_rep], mode='concat', concat_axis=-1)
    fake_prob = sequential([
        LSTM(8),
        RepeatVector(output_size),
        LSTM(8, return_sequences=True),
        TimeDistributed(Dense(nb_chars, activation='softmax')),
    ])(seq_and_z)

    g = Model([z, seq], [fake_prob])
    return g
Example #19
0
 def load(self, config_path, weight_path):
     if os.path.exists(config_path) and os.path.exists(weight_path):
         logger.debug(f"loading model from {config_path}")
         with open(config_path, "rt") as f:
             self.model = Model.from_config(json.load(f))
         self.model.load_weights(weight_path)
         self.digest = self.fetch_digest(weight_path)
         logger.debug(f"loaded model digest = {self.digest}")
         return True
     else:
         logger.debug(
             f"model files does not exist at {config_path} and {weight_path}"
         )
         return False
 def load(self, config_path, weight_path):
     if os.path.exists(config_path) and os.path.exists(weight_path):
         print(f"loading model from {config_path}")
         with open(config_path, "rt") as f:
             self.model = Model.from_config(json.load(f))
         self.model.load_weights(weight_path)
         self.graph = tf.get_default_graph()
         print(f"loaded model digest = {self.fetch_digest(weight_path)}")
         return True
     else:
         print(
             f"model files does not exist at {config_path} and {weight_path}"
         )
         return False
Example #21
0
    def __init__(self, g, d, m, g_optimizer, d_optimizer):
        self.g = g
        self.d = d
        self.m = m

        self.z, self.seq_input = self.g.inputs
        self.fake_prob, = self.g.outputs
        with trainable(m, False):
            m_input = merge([self.seq_input, self.fake_prob], mode='concat', concat_axis=1)
            self.m_realness = self.m(m_input)
            self.model_fit_g = Model([self.z, self.seq_input], [self.m_realness])
            self.model_fit_g.compile(g_optimizer, K.binary_crossentropy)

        self.d.compile(d_optimizer, loss=K.binary_crossentropy)
    def create_policy_value_net(self):
        """create the policy value network """
        in_x = network = Input((13,))

        # conv layers
        network = Dense(64, activation='relu', kernel_regularizer=l2(self.l2_const))(network)
        network = Dense(64, activation='relu', kernel_regularizer=l2(self.l2_const))(network)
        network = Dense(32, activation='relu', kernel_regularizer=l2(self.l2_const))(network)
        network = Dense(32, activation='relu', kernel_regularizer=l2(self.l2_const))(network)


        self.policy_net = Dense(6, activation='softmax', kernel_regularizer=l2(self.l2_const))(network)
        # state value layers

        self.value_net = Dense(1, activation='tanh', kernel_regularizer=l2(self.l2_const))(network)

        self.model = Model(in_x, [self.policy_net, self.value_net])

        def policy_value(state_input):
            state_input_union = np.array(state_input)
            results = self.model.predict_on_batch(state_input_union)
            return results
        self.policy_value = policy_value
Example #23
0
def create_squeeze_net():
    inp = Input(shape=getInputDim())
    x = Conv2D(64, (3, 3), padding='same', activation='relu')(inp)
    x = MaxPooling2D((3, 3), strides=2)(x)
    x = create_fire_mod(x, 64, 128)
    x = MaxPooling2D((3, 3), strides=2)(x)
    x = create_fire_mod(x, 64, 128)
    x = MaxPooling2D((3, 3), strides=2)(x)

    x = Conv2D(32, (1, 1), activation='relu')(x)

    x = Flatten()(x)
    x = Dense(64, activation='relu')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inp, x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adagrad',
                  metrics=[
                      'binary_accuracy',
                  ])

    return model
def create_model():
    tokens = get_tokens()
    num_tokens = len(tokens) + 1
    input_data = Input(name='speech_data_input', shape=(500, 13))
    layer_dense_1 = Dense(256, activation="relu", use_bias=True, kernel_initializer='he_normal')(input_data)
    layer_dropout_1 = Dropout(0.4)(layer_dense_1)
    layer_dense_2 = Dense(512, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_dropout_1)
    layer_gru1 = GRU(512, return_sequences=True, kernel_initializer='he_normal', dropout=0.4)(layer_dense_2)
    layer_gru2 = GRU(512, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', dropout=0.4)(layer_gru1)
    layer_dense_3 = Dense(256, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_gru2)
    layer_dropout_2 = Dropout(0.4)(layer_dense_3)
    layer_dense_4 = Dense(num_tokens, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_dropout_2)
    output = Activation('softmax', name='Activation0')(layer_dense_4)
    #ctc
    labels = Input(name='speech_labels', shape=[70], dtype='int64')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc')([labels, output, input_length, label_length])
    model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
    adad = Adadelta(lr=0.01, rho=0.95, epsilon=K.epsilon())
    model.compile(loss={'ctc': lambda y_true, output: output}, optimizer=adad)
    print("model compiled successful!")
    return model
Example #25
0
 def simple_model(self, layers):
     print("building simple input")
     input = x = Input(shape=(12, 8, 8))
     x = BatchNormalization(name="in_2", axis=1)(x)
     x = Conv2D(padding='same',
                filters=64,
                kernel_size=4,
                use_biase=False,
                data_format="channels_first",
                name="in_1")
     x = Activation("relu", name="in_3")(x)
     for i in range(layers):
         self.build_residuals(x, i)
     return Model(input, [out1, out2], name="palmtree")
Example #26
0
def test_sparse_placeholder_predict():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    in1 = Input(shape=(3,))
    in2 = Input(shape=(3,), sparse=True)
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.compile('rmsprop', 'mse')
    model.predict(test_inputs, batch_size=2)
Example #27
0
    def build(self):
        mc = self.config.model
        in_x = x = Input((28, 10, 9))
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" + str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=32, kernel_size=1, data_format="channels_first", use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg), name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)
        policy_out = Dense(self.n_labels, kernel_regularizer=l2(mc.l2_reg), activation="softmax", name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4, kernel_size=1, data_format="channels_first", use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg), name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size, kernel_regularizer=l2(mc.l2_reg), activation="relu", name="value_dense")(x)
        value_out = Dense(1, kernel_regularizer=l2(mc.l2_reg), activation="tanh", name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="cchess_model")
        self.graph = tf.get_default_graph()
def k_2pipeline_mlp_2outputs(yao_indices_dim, topics_dim, image_input, base_model, with_compile=True):

    # aux_mlp layer parameters follow cnn3_mlp_channel_2
    _aux_mlp_units_1 = 80
    _aux_mlp_activation_1 = 'relu'
    _aux_mlp_dropout_1 = 0.5

    # output layer parameters
    _output_units = yao_indices_dim
    _output_kernel_regularizer = None
    _output_activation = 'sigmoid'

    _aux_output_units = topics_dim
    _aux_output_activation = 'softmax'

    print('Build 2 deeper pipeline + MLP model...')

#     base_model. summary()

    pipeline_1 = base_model.output
    pipeline_2 = base_model.output
    concatenated = concatenate([pipeline_1, pipeline_2], axis=-1)
    gen_output = Dense(units=_output_units, kernel_regularizer=_output_kernel_regularizer,
                       activation=_output_activation, name='gen_output')(concatenated)
    # aux_output only get features from only cnn2_mlp channel_2
    aux_output = Dense(units=_aux_output_units,
                       activation=_aux_output_activation, name='aux_output')(concatenated)
    pipeline2_mlp_2output_model = Model(inputs=image_input, outputs=[gen_output, aux_output])

    print('deeper_pipeline_model structure...')
    pipeline2_mlp_2output_model.summary()
    
    if with_compile == True:
        return double_output_compiler(pipeline2_mlp_2output_model, scaling_activation='binary')
    else:
        # ready to joint in some other frameworks like Tensorflow
        return pipeline2_mlp_2output_model
Example #29
0
 def __init__(self):
     self.project_dir = (os.path.dirname(__file__))
     self.data_path = os.path.join(self.project_dir, "./data/starCraft")
     self.result_work_path = os.path.join(self.project_dir,
                                          './result/DefogGAN')
     self.making_ing_path = os.path.join(self.project_dir,
                                         './result/DefogGAN')
     self.createFolder(self.result_work_path)
     is_multi_gpu = True
     self.is_validation_check = True
     self.save_weights = True
     #self.num_of_replay = 3500
     self.n_epochs = 1000
     self.batch_size = 512
     self.save_interval = 1000
     self.num_of_making_img = 5
     optimizer = Adam(0.0001, beta_1=0.5, beta_2=0.9)
     self.discriminator = self.build_discriminator()
     self.discriminator.compile(loss='binary_crossentropy',
                                optimizer=optimizer,
                                metrics=['accuracy'])
     self.generator = self.build_generator()
     fog_img = Input(shape=(82, 32, 32))
     gen_missing = self.generator(fog_img)
     self.discriminator.trainable = False
     valid = self.discriminator(gen_missing)
     self.combined = Model(fog_img, [gen_missing, valid])
     if is_multi_gpu:
         self.combined = multi_gpu_model(self.combined,
                                         gpus=self.get_count_of_gpu())
     weight = K.variable(
         np.array([0.75, 0.1875, 0.0468, 0.012, 0.003, 0.0007]))
     self.combined.compile(loss=[
         self.weighted_pyramidal_loss(weights=weight), 'binary_crossentropy'
     ],
                           loss_weights=[0.999, 0.001],
                           optimizer=optimizer)
Example #30
0
    def train_f_enc(self, steps_list, epoch=50):
        print("training f_enc")
        f_swap0 = Sequential(name='f_swap0')
        f_swap0.add(self.f_enc)
        f_swap0.add(Dense(FIELD_DEPTH))
        f_swap0.add(Activation('softmax', name='softmax_swap0'))

        f_swap1 = Sequential(name='f_swap1')
        f_swap1.add(self.f_enc)
        f_swap1.add(Dense(FIELD_DEPTH))
        f_swap1.add(Activation('softmax', name='softmax_swap1'))

        env_model = Model(self.f_enc.inputs, [f_swap0.output, f_swap1.output], name="env_model")
        env_model.compile(optimizer='adam', loss=['categorical_crossentropy']*2)

        for ep in range(epoch):
            losses = []
            for idx, steps_dict in enumerate(steps_list):
                prev = None
                for step in steps_dict['steps']:
                    x = self.convert_input(step.input)[:2]
                    env_values = step.input.env.reshape((3, -1))
                    p1 = np.clip(env_values[0].argmax() - 1, 0, 9)
                    p2 = np.clip(env_values[1].argmax() - 1, 0, 9)
                    p3 = np.clip(env_values[2].argmax() - 1, 0, 9)
                    now = (p1, p2, p3)
                    if prev == now:
                        continue
                    prev = now
                    y0 = to_one_hot_array(min(p1, p2), FIELD_DEPTH)
                    y1 = to_one_hot_array(max(p1, p2), FIELD_DEPTH)
                    y = [yy.reshape((self.batch_size, -1)) for yy in [y0, y1]]
                    loss = env_model.train_on_batch(x, y)
                    losses.append(loss)
            print("ep %3d: loss=%s" % (ep, np.average(losses)))
            if np.average(losses) < 1e-06:
                break
def build_model(args):
    cnn_filter_num = args['cnn_filter_num']
    cnn_filter_size = args['cnn_filter_size']
    l2_reg = args['l2_reg']

    in_x = x = Input(args['input_dim'])

    # (batch, channels, height, width)
    x = Conv2D(filters=cnn_filter_num,
               kernel_size=cnn_filter_size,
               padding="same",
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)

    for _ in range(args['res_layer_num']):
        x = _build_residual_block(args, x)

    res_out = x

    # for policy output
    x = Conv2D(filters=2,
               kernel_size=1,
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    policy_out = Dense(args['policy_dim'],
                       kernel_regularizer=l2(l2_reg),
                       activation="softmax",
                       name="policy")(x)

    # for value output
    x = Conv2D(filters=1,
               kernel_size=1,
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    x = Dense(256, kernel_regularizer=l2(l2_reg), activation="relu")(x)
    value_out = Dense(1,
                      kernel_regularizer=l2(l2_reg),
                      activation="tanh",
                      name="value")(x)

    return Model(in_x, [policy_out, value_out], name="model")
Example #32
0
def EEGNet(nb_classes=3,
           Chans=5,
           Samples=300,
           dropoutRate=0.25,
           kernLength=150,
           F1=2,
           D=2,
           F2=8,
           dropoutType='Dropout'):

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(1, Samples, Chans))

    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(1, Samples, Chans),
                    use_bias=False)(input1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.),
                             data_format='channels_first')(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 2), data_format='channels_first')(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16), use_bias=False,
                             padding='same')(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 1))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes, name='dense',
                  kernel_constraint=max_norm(0.25))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
Example #33
0
    def bulid(self):  # build model
        image_in = Input((None, None, self.channels))

        conv = Conv2D(filters=self.cnn_filter_num,
                      kernel_size=self.cnn_kernel_size,
                      strides=(1, 1),
                      padding='same',
                      data_format='channels_last')(image_in)
        conv = Activation('relu')(conv)

        x = conv

        for layers in range(8):
            x = self._build_residual_block(x)

        conv_out = Conv2D(filters=self.channels,
                          kernel_size=self.cnn_kernel_size,
                          strides=(1, 1),
                          padding='same',
                          data_format='channels_last')(x)

        output = Add()([image_in, conv_out])

        self.model = Model(image_in, output, name='model')
 def load(self, config_path, weight_path):
     if os.path.exists(config_path) and os.path.exists(weight_path):
         logger.debug(f"loading model from {config_path}")
         with open(config_path, "rt") as f:
             self.model = Model.from_config(json.load(f))
         self.model.load_weights(weight_path)
         self.graph = get_default_graph()
         # self.model._make_predict_function()
         self.digest = self.fetch_digest(weight_path)
         logger.debug(f"loaded model digest = {self.digest}")
         return True
     else:
         logger.debug(
             f"model files do not exist at {config_path} and {weight_path}")
         return False
Example #35
0
def decoder_baseline(label_sizes,
                     nb_bits=12,
                     data_shape=(1, 64, 64),
                     depth=1,
                     nb_filter=16,
                     optimizer='adam'):
    n = nb_filter
    input = Input(shape=data_shape)
    x = sequential([
        conv2d_block(n, depth=depth, pooling='max'),  # 32x32
        conv2d_block(2 * n, depth=depth, pooling='max'),  # 16x16
        conv2d_block(4 * n, depth=depth, pooling='max'),  # 8x8
        conv2d_block(8 * n, depth=depth, pooling='max'),  # 4x4
    ])(input)
    outputs, losses = decoder_end_block(x,
                                        label_sizes,
                                        nb_bits,
                                        activation=lambda: ELU())
    model = Model(input, list(outputs.values()))
    model.compile(
        optimizer,
        loss=list(losses.values()),
    )
    return model
Example #36
0
def get_unet_resnet(input_shape):
    resnet_base = ResNet50(input_shape=input_shape,
                           weights=None,
                           include_top=False)

    # if args.show_summary:
    #     resnet_base.summary()

    for l in resnet_base.layers:
        l.trainable = True
    conv1 = resnet_base.get_layer("activation_1").output
    conv2 = resnet_base.get_layer("activation_10").output
    conv3 = resnet_base.get_layer("activation_22").output
    conv4 = resnet_base.get_layer("activation_40").output
    conv5 = resnet_base.get_layer("activation_49").output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    vgg = vgg16.VGG16(input_shape=input_shape,
                      input_tensor=resnet_base.input,
                      weights=None,
                      include_top=False)
    for l in vgg.layers:
        l.trainable = False
    vgg_first_conv = vgg.get_layer("block1_conv2").output
    up10 = concatenate(
        [UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)
    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(resnet_base.input, x)

    return model
Example #37
0
def test_sparse_input_target_evaluate():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
    in1 = Input(shape=(3,))
    in2 = Input(shape=(3,))
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.compile('rmsprop', 'mse')
    model.evaluate(test_inputs, test_outputs, batch_size=2)
Example #38
0
def test_sparse_placeholder_fit():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
    in1 = Input(shape=(3,))
    in2 = Input(shape=(3,), sparse=True)
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.compile('rmsprop', 'mse')
    model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.2)
Example #39
0
    def build_model(self):
        """
        Build the Keras model
        """
        model_config = self.config.move_probability_model
        input_layer = model = Input((model_config.input_size,))

        # Build dense layers
        for index, size in enumerate(model_config.dense_layer_sizes):
            model = self._build_dense_layer(model, size, index)

        # Move Probability Output
        model_out = Dense(model_config.num_candidate_moves, kernel_regularizer=l2(model_config.l2_reg),
                          activation='softmax', name='output')(model)

        self.model = Model(input_layer, model_out, name='chess_move_model')
Example #40
0
def get_vgg_7conv(input_shape):
    img_input = Input(input_shape)
    vgg16_base = VGG16(input_tensor=img_input, include_top=False)
    for l in vgg16_base.layers:
        l.trainable = True
    conv1 = vgg16_base.get_layer("block1_conv2").output
    conv2 = vgg16_base.get_layer("block2_conv2").output
    conv3 = vgg16_base.get_layer("block3_conv3").output
    pool3 = vgg16_base.get_layer("block3_pool").output

    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv1")(pool3)
    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv2")(conv4)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4)

    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv1")(pool4)
    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv2")(conv5)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5)

    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv1")(pool5)
    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv2")(conv6)
    pool6 = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(conv6)

    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv1")(pool6)
    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv2")(conv7)

    up8 = concatenate([Conv2DTranspose(384, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv7), conv6], axis=3)
    conv8 = Conv2D(384, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up8)

    up9 = concatenate([Conv2DTranspose(256, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv8), conv5], axis=3)
    conv9 = Conv2D(256, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up9)

    up10 = concatenate([Conv2DTranspose(192, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv9), conv4], axis=3)
    conv10 = Conv2D(192, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up10)

    up11 = concatenate([Conv2DTranspose(128, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv10), conv3], axis=3)
    conv11 = Conv2D(128, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up11)

    up12 = concatenate([Conv2DTranspose(64, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv11), conv2], axis=3)
    conv12 = Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up12)

    up13 = concatenate([Conv2DTranspose(32, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv12), conv1], axis=3)
    conv13 = Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up13)

    conv13 = Conv2D(1, (1, 1))(conv13)
    conv13 = Activation("sigmoid")(conv13)
    model = Model(img_input, conv13)
    return model
Example #41
0
def build_model():
    data_shape = (300, 5)  # (batch, channels, steps)

    model = Sequential()
    input_main = Input((300, 5))
    block1 = Conv1D(25,
                    15,
                    data_format='channels_last',
                    input_shape=data_shape)(input_main)

    block1 = Conv1D(25, 5, data_format='channels_first')(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = MaxPooling1D(pool_size=2, strides=2)(block1)
    block1 = Dropout(0.5)(block1)

    block2 = Conv1D(50, 5, data_format='channels_first')(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = MaxPooling1D(pool_size=2, strides=2)(block2)
    block2 = Dropout(0.5)(block2)

    block3 = Conv1D(100, 5, data_format='channels_first')(block2)
    block3 = BatchNormalization(axis=1)(block3)
    block3 = Activation('elu')(block3)
    block3 = MaxPooling1D(pool_size=2, strides=2)(block3)
    block3 = Dropout(0.5)(block3)

    block4 = Conv1D(200, 5, data_format='channels_first')(block3)
    block4 = BatchNormalization(axis=1)(block4)
    block4 = Activation('elu')(block4)
    block4 = MaxPooling1D(pool_size=2, strides=2)(block4)
    block4 = Dropout(0.5)(block4)

    flatten = Flatten()(block4)

    dense = Dense(3, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)
    model = Model(inputs=input_main, outputs=softmax)

    start = time.time()
    model.compile(loss="categorical_crossentropy",
                  optimizer=optimizers.Adagrad(),
                  metrics=['accuracy'])
    print("Compilation time: ", time.time(), '-', start)

    return model
Example #42
0
 def load(self, config_path: str, weight_path: str) -> bool:
     if os.path.exists(weight_path):  # os.path.exists(config_path) and
         logger.debug(f"loading model from {config_path}")
         with open(config_path, "rt") as f:
             self.model = Model.from_config(json.load(f))
         self.model.load_weights(weight_path)
         self.model.compile(
             loss='mse', optimizer=Adam(lr=self.config.model.learning_rate))
         self.model.summary()
         self.digest = self.fetch_digest(weight_path)
         logger.debug(f"loaded model digest = {self.digest}")
         return True
     else:
         logger.debug(
             f"model files does not exist at {config_path} and {weight_path}"
         )
         return False
Example #43
0
def test_sparse_input_validation_split():
    test_input = sparse.random(6, 3, density=0.25).tocsr()
    in1 = Input(shape=(3, ), sparse=True)
    out1 = Dense(4)(in1)
    test_output = np.random.random((6, 4))
    model = Model(in1, out1)
    model.compile('rmsprop', 'mse')
    model.fit(test_input,
              test_output,
              epochs=1,
              batch_size=2,
              validation_split=0.2)
Example #44
0
def get_model2(input_shape, num_classes, dp):
    x = input_1 = Input(shape=input_shape)
    x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    # x = Dropout(rate=0.5)(x)
    x = Dropout(rate=dp)(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(rate=dp)(x)
    x = Flatten()(x)
    # x = Dense(units=128)(x)
    # x = Dropout(rate=dp)(x)
    x = Dense(units=num_classes)(x)
    x = Activation(activation='softmax')(x)
    model = Model(inputs=[input_1], outputs=[x])
    return model
Example #45
0
    def Load(self, configPath, weightPath):

        if os.access(configPath, os.F_OK):
            while os.access(configPath, os.W_OK) == False or os.access(
                    weightPath, os.W_OK) == False:
                time.sleep(0.001)

        while True:
            try:
                with open(configPath, "rt") as f:
                    config = json.load(f)
                    self.OptimizeCount = config["OptimizeCount"]
                    self.TimeLimit = config["TimeLimit"]
                    self.Model = Model.from_config(config)
                    self.Model.load_weights(weightPath)
                break
            except:
                time.sleep(0.1)
def build_model(input_shape):
    input_tensor = Input(shape=input_shape)
    base = efn.EfficientNetB0(weights='imagenet', include_top=False)

    for l in base.layers:
        l.trainable = True

    conv = base.output

    x = GlobalAveragePooling2D(name='pool1')(conv)
    x = BatchNormalization()(x)
    x = Dense(512, name='fc1')(x)
    x = Activation('relu', name='relu1')(x)
    x = BatchNormalization()(x)
    #x = Dropout(0.5)(x)
    x = Dense(4, name='fc3')(x)
    x = Activation('sigmoid', name='sigmoid')(x)
    model = Model(inputs=[base.input], outputs=[x])
    return model
Example #47
0
def build_model():
    """
    builds full keras model and returns it
    """
    in_x = x = Input((1, 8, 8))

    # (batch, channels, height, width)
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_first_filter_size, padding="same", data_format="channels_first",
               use_bias=False, kernel_regularizer=l2(l2_reg),
               name="input_conv-" + str(cnn_first_filter_size) + "-" + str(cnn_filter_num))(x)
    x = BatchNormalization(axis=1, name="input_batchnorm")(x)
    x = Activation("relu", name="input_relu")(x)

    for i in range(res_layer_num):
        x = _build_residual_block(x, i + 1)

    res_out = x

    # for policy output
    x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", use_bias=False, kernel_regularizer=l2(l2_reg),
               name="policy_conv-1-2")(res_out)

    x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
    x = Activation("relu", name="policy_relu")(x)
    x = Flatten(name="policy_flatten")(x)

    # no output for 'pass'
    policy_out = Dense(n_labels, kernel_regularizer=l2(l2_reg), activation="softmax", name="policy_out")(x)

    # for value output
    x = Conv2D(filters=4, kernel_size=1, data_format="channels_first", use_bias=False, kernel_regularizer=l2(l2_reg),
               name="value_conv-1-4")(res_out)

    x = BatchNormalization(axis=1, name="value_batchnorm")(x)
    x = Activation("relu", name="value_relu")(x)
    x = Flatten(name="value_flatten")(x)
    x = Dense(value_fc_size, kernel_regularizer=l2(l2_reg), activation="relu", name="value_dense")(x)

    value_out = Dense(1, kernel_regularizer=l2(l2_reg), activation="tanh", name="value_out")(x)

    model = Model(in_x, [policy_out, value_out], name="hex_model")

    sgd = optimizers.SGD(lr=learning_rate, momentum=momentum)

    losses = ['categorical_crossentropy', 'mean_squared_error']

    model.compile(loss=losses, optimizer='adam', metrics=['accuracy', 'mae'])

    model.summary()
    return model
Example #48
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        in_x = x = Input((1, 19, 19))
        x = self.Input_Conv_1(x)
        for i in range(20):
            x = self.Inception_blocks(x, 64, i+1)
        x = AveragePooling2D(pool_size=(7, 7))(x)
        x = Dropout(0.5, name='dropout-1')(x)
        x = Flatten()(x)
        # x = Dense(1024, activation='softmax')(x)
        x = Dense(34, activation='softmax')(x)
        model = Model(inputs=in_x, outputs=x)
        # model.summary()

        # x_test = np.array([[[1,2],[2,3],[3,4],[4,5]]])
        # print (model.predict(x_test))
        # plot_model(model, to_file='Resnet_simple_v1.png', show_shapes=True)
        return model
Example #49
0
    def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
        lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True, name='lstm_layer1')
        lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss1')
        lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss2')
        lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss3')

        lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss4')

        lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss5')
        h1 = lstm_layer_1(financial_time_series_input)
        h21 = lstm_layer_21(h1)
        h22 = lstm_layer_22(h1)
        h23 = lstm_layer_23(h1)
        h24 = lstm_layer_24(h1)
        h25 = lstm_layer_25(h1)
        time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21)  # custom 1
        time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22)  # custom 2
        time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23)  # mse
        time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24)  # logloss
        time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25)  # cross
        self.model = Model(input=financial_time_series_input,
                           output=[time_series_predictions1, time_series_predictions2,
                                   time_series_predictions3, time_series_predictions4,
                                   time_series_predictions5],
                           name="multi-task deep rnn for financial time series forecasting")
        plot(self.model, to_file='model.png')
Example #50
0
    def _build_generator_given_z(self):
        z = Input(shape=(self.z_dim,), name='z')

        z_bits = Subtensor(*self.pos_z_bits, axis=1)(z)
        z_labels = Subtensor(*self.pos_z_labels, axis=1)(z)
        z_offset = Subtensor(*self.pos_z_offset, axis=1)(z)
        bits = ThresholdBits()(z_bits)
        nb_labels_without_bits = self.labels_shape[0] - self.nb_bits
        generated_labels = get_label_generator(
            z_labels, self.generator_units, nb_output_units=nb_labels_without_bits)

        labels_normed = NormSinCosAngle(0)(generated_labels)
        labels = concat([bits, labels_normed], name='labels')
        fake = self.generator_given_z_and_labels([z_offset, labels])
        self.generator_given_z = Model([z], [fake])

        sample_tensors = self.sample_generator_given_z_and_labels([z_offset, labels])
        sample_tensors = [name_tensor(t, n)
                          for t, n in zip(sample_tensors,
                                          self.sample_generator_given_z_and_labels.output_names)]
        self.sample_generator_given_z_output_names = ['labels'] + \
            self.sample_generator_given_z_and_labels_output_names
        self.sample_generator_given_z = Model([z], [labels] + sample_tensors)
Example #51
0
def test_conv2d_block():
    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4)
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 8, 8)

    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4, pooling='avg')
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 4, 4)

    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4, up=True)
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 16, 16)
Example #52
0
def simple_gan():
    z = Input(batch_shape=simple_gan_z_shape, name='z')
    generator = sequential([
        Dense(4*simple_gan_nb_z, activation='relu', name='g1'),
        Dense(4*simple_gan_nb_z, activation='relu', name='g2'),
        Dense(simple_gan_nb_out, name='g_loss'),
    ])(z)

    d_input = Input(batch_shape=simple_gan_real_shape, name='data')

    discriminator = sequential([
        Dense(400, input_dim=2, name='d1'),
        LeakyReLU(0.3),
        Dense(400, name='d2'),
        LeakyReLU(0.3),
        Dense(1, activation='sigmoid', name='d_loss')
    ])(d_input)
    g = Model(z, generator)
    g.compile(Adam(lr=0.0002, beta_1=0.5), {'g_loss': 'binary_crossentropy'})
    d = Model(d_input, discriminator)
    d.compile(Adam(lr=0.0002, beta_1=0.5), {'d_loss': 'binary_crossentropy'})
    return GAN(g, d)
Example #53
0
def test_model_methods():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    # test train_on_batch
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               {'dense_1': output_a_np, 'dropout': output_b_np})

    # test fit
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], nb_epoch=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np], nb_epoch=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4)

    # test validation_split
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4, validation_split=0.5)

    # test validation data
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4,
                    validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5,
                    validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4, validation_split=0.5,
                    validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))

    # test_on_batch
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              {'dense_1': output_a_np, 'dropout': output_b_np})

    # predict_on_batch
    out = model.predict_on_batch([input_a_np, input_b_np])
    out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})

    # predict, evaluate
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)

    # with sample_weight
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    sample_weight = [None, np.random.random((10,))]
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               sample_weight=sample_weight)

    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np],
                              sample_weight=sample_weight)

    # test accuracy metric
    model.compile(optimizer, loss, metrics=['acc'],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 5
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 5

    # this should also work
    model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # and this as well
    model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # test starting from non-zero initial epoch
    trained_epochs = []

    def on_epoch_begin(epoch, logs):
        trained_epochs.append(epoch)
    tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin)
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], nb_epoch=5, batch_size=4,
                    initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test starting from non-zero initial epoch for generator too
    trained_epochs = []

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
    out = model.fit_generator(gen_data(4), samples_per_epoch=10, nb_epoch=5,
                              initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test with a custom metric function
    mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2))

    def mse_powers(y_true, y_pred):
        m = mse(y_true, y_pred)
        return {
            'mse_squared': K.pow(m, 2),
            'mse_cubed': K.pow(m, 3)
        }

    model.compile(optimizer, loss, metrics=[mse, mse_powers],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out_len = 1 + 2 * 4  # total loss, per layer: loss + 3 metrics
    assert len(out) == out_len
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == out_len

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1)
    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)
Example #54
0
def test_trainable_weights_count_consistency():
    """Tests the trainable weights consistency check of Model.

    This verifies that a warning is shown if model.trainable is modified
    and the model is summarized/run without a new call to .compile()

    Reproduce issue #8121
    """
    a = Input(shape=(3,), name='input_a')
    model1 = Model(inputs=a, outputs=Dense(1)(a))

    model1.trainable = False
    b = Input(shape=(3,), name='input_b')
    y = model1(b)
    model2 = Model(inputs=b, outputs=Dense(1)(y))

    model2.compile(optimizer='adam', loss='mse')

    model1.trainable = True

    # Should warn on .summary()
    with pytest.warns(UserWarning) as w:
        model2.summary()
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And on .fit()
    with pytest.warns(UserWarning) as w:
        model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And shouldn't warn if we recompile
    model2.compile(optimizer='adam', loss='mse')
    with pytest.warns(None) as w:
        model2.summary()
    assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
Example #55
0
def test_model_methods():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))
    input_a_df = pd.DataFrame(input_a_np)
    input_b_df = pd.DataFrame(input_b_np)

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))
    output_a_df = pd.DataFrame(output_a_np)
    output_b_df = pd.DataFrame(output_b_np)

    # training/testing doesn't work before compiling.
    with pytest.raises(RuntimeError):
        model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np])

    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    # test train_on_batch
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               {'dense_1': output_a_np, 'dropout': output_b_np})
    out = model.train_on_batch([input_a_df, input_b_df],
                               [output_a_df, output_b_df])

    # test fit
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], epochs=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np], epochs=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    epochs=1, batch_size=4)
    out = model.fit([input_a_df, input_b_df],
                    [output_a_df, output_b_df], epochs=1, batch_size=4)

    # test validation_split
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    epochs=1, batch_size=4, validation_split=0.5)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    epochs=1, batch_size=4, validation_split=0.5)

    # test validation data
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    epochs=1, batch_size=4,
                    validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    epochs=1, batch_size=4, validation_split=0.5,
                    validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    epochs=1, batch_size=4, validation_split=0.5,
                    validation_data=(
                        {'input_a': input_a_np, 'input_b': input_b_np},
                        {'dense_1': output_a_np, 'dropout': output_b_np}))

    # test_on_batch
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              {'dense_1': output_a_np, 'dropout': output_b_np})
    out = model.test_on_batch([input_a_df, input_b_df],
                              [output_a_df, output_b_df])

    # predict_on_batch
    out = model.predict_on_batch([input_a_np, input_b_np])
    out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
    out = model.predict_on_batch([input_a_df, input_b_df])

    # predict, evaluate
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)
    out = model.predict([input_a_df, input_b_df], batch_size=4)

    # with sample_weight
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    sample_weight = [None, np.random.random((10,))]
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               sample_weight=sample_weight)

    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np],
                              sample_weight=sample_weight)

    # test accuracy metric
    model.compile(optimizer, loss, metrics=['acc'],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 5
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 5

    # this should also work
    model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # and this as well
    model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # test starting from non-zero initial epoch
    trained_epochs = []

    # define tracer callback
    def on_epoch_begin(epoch, logs):
        trained_epochs.append(epoch)

    tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin)

    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], epochs=5, batch_size=4,
                    initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test starting from non-zero initial epoch for generator too
    trained_epochs = []

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])

    out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5,
                              initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test with a custom metric function
    def mse(y_true, y_pred):
        return K.mean(K.pow(y_true - y_pred, 2))

    model.compile(optimizer, loss, metrics=[mse],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out_len = 1 + 2 * (1 + 1)  # total loss + 2 outputs * (loss + metric)
    assert len(out) == out_len
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == out_len

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1)
    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)

    # empty batch
    with pytest.raises(ValueError):
        def gen_data():
            while True:
                yield (np.asarray([]), np.asarray([]))
        out = model.evaluate_generator(gen_data(), steps=1)

    # x is not a list of numpy arrays.
    with pytest.raises(ValueError):
        out = model.predict([None])

    # x does not match _feed_input_names.
    with pytest.raises(ValueError):
        out = model.predict([input_a_np, None, input_b_np])
    with pytest.raises(ValueError):
        out = model.predict([None, input_a_np, input_b_np])

    # all input/output/weight arrays should have the same number of samples.
    with pytest.raises(ValueError):
        out = model.train_on_batch([input_a_np, input_b_np[:2]],
                                   [output_a_np, output_b_np],
                                   sample_weight=sample_weight)
    with pytest.raises(ValueError):
        out = model.train_on_batch([input_a_np, input_b_np],
                                   [output_a_np, output_b_np[:2]],
                                   sample_weight=sample_weight)
    with pytest.raises(ValueError):
        out = model.train_on_batch([input_a_np, input_b_np],
                                   [output_a_np, output_b_np],
                                   sample_weight=[sample_weight[1], sample_weight[1][:2]])

    # `sample_weight` is neither a dict nor a list.
    with pytest.raises(TypeError):
        out = model.train_on_batch([input_a_np, input_b_np],
                                   [output_a_np, output_b_np],
                                   sample_weight=tuple(sample_weight))

    # `validation_data` is neither a tuple nor a triple.
    with pytest.raises(ValueError):
        out = model.fit([input_a_np, input_b_np],
                        [output_a_np, output_b_np],
                        epochs=1, batch_size=4,
                        validation_data=([input_a_np, input_b_np],))

    # `loss` does not match outputs.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss=['mse', 'mae', 'mape'])

    # `loss_weights` does not match output_names.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5})

    # `loss_weights` does not match outputs.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss='mse', loss_weights=[0.5])

    # `loss_weights` is invalid type.
    with pytest.raises(TypeError):
        model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5))

    # `sample_weight_mode` does not match output_names.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'})

    # `sample_weight_mode` does not match output_names.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'])

    # `sample_weight_mode` matches output_names partially.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'})

    # `loss` does not exist.
    with pytest.raises(ValueError):
        model.compile(optimizer, loss=[])

    model.compile(optimizer, loss=['mse', 'mae'])
    model.compile(optimizer, loss='mse', loss_weights={'dense_1': 0.2, 'dropout': 0.8})
    model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8])

    # the rank of weight arrays should be 1.
    with pytest.raises(ValueError):
        out = model.train_on_batch([input_a_np, input_b_np],
                                   [output_a_np, output_b_np],
                                   sample_weight=[None, np.random.random((10, 20, 30))])

    model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': None, 'dropout': 'temporal'})
    model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal'])

    # the rank of output arrays should be at least 3D.
    with pytest.raises(ValueError):
        out = model.train_on_batch([input_a_np, input_b_np],
                                   [output_a_np, output_b_np],
                                   sample_weight=sample_weight)

    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)
    trained_epochs = []
    out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=12, epochs=5,
                              initial_epoch=0, validation_data=RandomSequence(4),
                              validation_steps=12, callbacks=[tracker_cb])
    assert trained_epochs == [0, 1, 2, 3, 4]
Example #56
0
def test_model_custom_target_tensors():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    y = K.placeholder([10, 4], name='y')
    y1 = K.placeholder([10, 3], name='y1')
    y2 = K.placeholder([7, 5], name='y2')
    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]

    # test list of target tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                      sample_weight_mode=None, target_tensors=[y, y1, y2])
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None, target_tensors=[y, y1])
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})
    # test dictionary of target_tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss,
                      metrics=[],
                      loss_weights=loss_weights,
                      sample_weight_mode=None,
                      target_tensors={'does_not_exist': y2})
    # test dictionary of target_tensors
    model.compile(optimizer, loss,
                  metrics=[],
                  loss_weights=loss_weights,
                  sample_weight_mode=None,
                  target_tensors={'dense_1': y, 'dropout': y1})
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})

    if K.backend() == 'tensorflow':
        import tensorflow as tf
        # test with custom TF placeholder as target
        pl_target_a = tf.placeholder('float32', shape=(None, 4))
        model.compile(optimizer='rmsprop', loss='mse',
                      target_tensors={'dense_1': pl_target_a})
        model.train_on_batch([input_a_np, input_b_np],
                             [output_a_np, output_b_np])
Example #57
0
def test_model_with_external_loss():
    # None loss, only regularization loss.
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1',
                kernel_regularizer='l1',
                bias_regularizer='l2')(a)
    dp = Dropout(0.5, name='dropout')
    a_3 = dp(a_2)

    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = None
    model.compile(optimizer, loss, metrics=['mae'])

    input_a_np = np.random.random((10, 3))

    # test train_on_batch
    out = model.train_on_batch(input_a_np, None)
    out = model.test_on_batch(input_a_np, None)
    # fit
    out = model.fit(input_a_np, None)
    # evaluate
    out = model.evaluate(input_a_np, None)

    # No dropout, external loss.
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    a_3 = Dense(4, name='dense_2')(a)

    model = Model(a, [a_2, a_3])
    model.add_loss(K.mean(a_3 + a_2))

    optimizer = 'rmsprop'
    loss = None
    model.compile(optimizer, loss, metrics=['mae'])

    # test train_on_batch
    out = model.train_on_batch(input_a_np, None)
    out = model.test_on_batch(input_a_np, None)
    # fit
    out = model.fit(input_a_np, None)
    # evaluate
    out = model.evaluate(input_a_np, None)

    # Test fit with no external data at all.
    if K.backend() == 'tensorflow':
        import tensorflow as tf

        a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
        a_2 = Dense(4, name='dense_1')(a)
        a_2 = Dropout(0.5, name='dropout')(a_2)
        model = Model(a, a_2)
        model.add_loss(K.mean(a_2))

        model.compile(optimizer='rmsprop',
                      loss=None,
                      metrics=['mean_squared_error'])

        # test train_on_batch
        out = model.train_on_batch(None, None)
        out = model.test_on_batch(None, None)
        out = model.predict_on_batch(None)

        # test fit
        with pytest.raises(ValueError):
            out = model.fit(None, None, epochs=1, batch_size=10)
        out = model.fit(None, None, epochs=1, steps_per_epoch=1)

        # test fit with validation data
        with pytest.raises(ValueError):
            out = model.fit(None, None,
                            epochs=1,
                            steps_per_epoch=None,
                            validation_steps=2)
        out = model.fit(None, None,
                        epochs=1,
                        steps_per_epoch=2,
                        validation_steps=2)

        # test evaluate
        with pytest.raises(ValueError):
            out = model.evaluate(None, None, batch_size=10)
        out = model.evaluate(None, None, steps=3)

        # test predict
        with pytest.raises(ValueError):
            out = model.predict(None, batch_size=10)
        out = model.predict(None, steps=3)
        assert out.shape == (10 * 3, 4)

        # Test multi-output model without external data.
        a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
        a_1 = Dense(4, name='dense_1')(a)
        a_2 = Dropout(0.5, name='dropout')(a_1)
        model = Model(a, [a_1, a_2])
        model.add_loss(K.mean(a_2))
        model.compile(optimizer='rmsprop',
                      loss=None,
                      metrics=['mean_squared_error'])

        # test train_on_batch
        out = model.train_on_batch(None, None)
        out = model.test_on_batch(None, None)
        out = model.predict_on_batch(None)

        # test fit
        with pytest.raises(ValueError):
            out = model.fit(None, None, epochs=1, batch_size=10)
        out = model.fit(None, None, epochs=1, steps_per_epoch=1)

        # test fit with validation data
        with pytest.raises(ValueError):
            out = model.fit(None, None,
                            epochs=1,
                            steps_per_epoch=None,
                            validation_steps=2)
        out = model.fit(None, None,
                        epochs=1,
                        steps_per_epoch=2,
                        validation_steps=2)

        # test evaluate
        with pytest.raises(ValueError):
            out = model.evaluate(None, None, batch_size=10)
        out = model.evaluate(None, None, steps=3)

        # test predict
        with pytest.raises(ValueError):
            out = model.predict(None, batch_size=10)
        out = model.predict(None, steps=3)
        assert len(out) == 2
        assert out[0].shape == (10 * 3, 4)
        assert out[1].shape == (10 * 3, 4)
Example #58
0
def test_model_with_partial_loss():
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    a_3 = dp(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dropout': 'mse'}
    model.compile(optimizer, loss, metrics=['mae'])

    input_a_np = np.random.random((10, 3))
    output_a_np = np.random.random((10, 4))

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])

    # Same without dropout.
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    a_3 = Dense(4, name='dense_2')(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dense_2': 'mse'}
    model.compile(optimizer, loss, metrics={'dense_1': 'mae'})

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])
Example #59
0
def test_model_with_input_feed_tensor():
    """We test building a model with a TF variable as input.
    We should be able to call fit, evaluate, predict,
    by only passing them data for the placeholder inputs
    in the model.
    """
    import tensorflow as tf

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])
    model.summary()

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=['mean_squared_error'],
                  loss_weights=loss_weights,
                  sample_weight_mode=None)

    # test train_on_batch
    out = model.train_on_batch(input_b_np,
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_b': input_b_np},
                               [output_a_np, output_b_np])
    out = model.test_on_batch({'input_b': input_b_np},
                              [output_a_np, output_b_np])
    out = model.predict_on_batch({'input_b': input_b_np})

    # test fit
    out = model.fit({'input_b': input_b_np},
                    [output_a_np, output_b_np], epochs=1, batch_size=10)
    out = model.fit(input_b_np,
                    [output_a_np, output_b_np], epochs=1, batch_size=10)

    # test evaluate
    out = model.evaluate({'input_b': input_b_np},
                         [output_a_np, output_b_np], batch_size=10)
    out = model.evaluate(input_b_np,
                         [output_a_np, output_b_np], batch_size=10)

    # test predict
    out = model.predict({'input_b': input_b_np}, batch_size=10)
    out = model.predict(input_b_np, batch_size=10)
    assert len(out) == 2

    # Now test a model with a single input
    # i.e. we don't pass any data to fit the model.
    a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
    a_2 = Dense(4, name='dense_1')(a)
    a_2 = Dropout(0.5, name='dropout')(a_2)
    model = Model(a, a_2)
    model.summary()

    optimizer = 'rmsprop'
    loss = 'mse'
    model.compile(optimizer, loss, metrics=['mean_squared_error'])

    # test train_on_batch
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.test_on_batch(None,
                              output_a_np)
    out = model.predict_on_batch(None)
    out = model.train_on_batch([],
                               output_a_np)
    out = model.train_on_batch({},
                               output_a_np)

    # test fit
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)

    # test evaluate
    out = model.evaluate(None,
                         output_a_np, batch_size=10)
    out = model.evaluate(None,
                         output_a_np, batch_size=10)

    # test predict
    out = model.predict(None, steps=3)
    out = model.predict(None, steps=3)
    assert out.shape == (10 * 3, 4)

    # Same, without learning phase
    # i.e. we don't pass any data to fit the model.
    a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
    a_2 = Dense(4, name='dense_1')(a)
    model = Model(a, a_2)
    model.summary()

    optimizer = 'rmsprop'
    loss = 'mse'
    model.compile(optimizer, loss, metrics=['mean_squared_error'])

    # test train_on_batch
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.test_on_batch(None,
                              output_a_np)
    out = model.predict_on_batch(None)
    out = model.train_on_batch([],
                               output_a_np)
    out = model.train_on_batch({},
                               output_a_np)

    # test fit
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)

    # test evaluate
    out = model.evaluate(None,
                         output_a_np, batch_size=10)
    out = model.evaluate(None,
                         output_a_np, batch_size=10)

    # test predict
    out = model.predict(None, steps=3)
    out = model.predict(None, steps=3)
    assert out.shape == (10 * 3, 4)
Example #60
0
class AIPlayer(Player):
    
    def __init__(self, buffer_size, sim_count, train=True, model="", tau = 1, compile=False):
        self.buffer = ReplayBuffer(buffer_size)
        self.temp_state = deque()
        self.train = train
        self.loss = 0
        self.acc = 0
        self.batch_count = 0
        self.sim_count = sim_count
        if model != "":
            self.load(model, compile)
        else:
            self.create_network()
        self.tau = tau

    @staticmethod
    def create_if_nonexistant(config):
        models = glob.glob(config.data.model_location + "*.h5")
        if len(models) == 0:
            ai = AIPlayer(config.buffer_size, config.game.simulation_num_per_move)
            ai.save(config.data.model_location+"model_0.h5")
            del ai

    def set_training(self, train):
        self.train = train
    
    @staticmethod
    def clear():
        K.clear_session()
    
    def load(self, file, compile=False):
        try:
            del self.network
        except Exception:
            pass
        self.network = load_model(file, custom_objects={"objective_function_for_policy":AIPlayer.objective_function_for_policy,
                                                        "objective_function_for_value":AIPlayer.objective_function_for_value}, compile=compile)
        
    def save(self, file):
        self.network.save(file)
    
    def create_network(self):
        x_in = Input((3, 8, 8))
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x_in)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        for _ in range(10):
            x = self._build_residual_block(x)

        res_out = x
        
        x = Conv2D(filters=2, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        policy_out = Dense(8*8+1, activation="softmax", name="policy_out")(x)

        x = Conv2D(filters=1, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(64, activation="relu")(x)
        value_out =  Dense(1, activation="tanh", name="value_out")(x)
        
        self.network = Model(x_in, [policy_out, value_out], name="reversi_model")
        self.compile()
      
    def _build_residual_block(self, x):
        in_x = x
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x)
        x = BatchNormalization(axis=1)(x)
        x = Add()([in_x, x])
        x = Activation("relu")(x)
        return x
        
    def compile(self):
        losses = [AIPlayer.objective_function_for_policy, AIPlayer.objective_function_for_value]
        self.network.compile(optimizer=optimizers.SGD(lr=1e-3, momentum=0.9), loss=losses)
      
    def update_lr(self, lr):
         K.set_value(self.network.optimizer.lr, lr)
        
    @staticmethod
    def objective_function_for_policy(y_true, y_pred):
        # can use categorical_crossentropy??
        return K.sum(-y_true * K.log(y_pred + K.epsilon()), axis=-1)

    @staticmethod
    def objective_function_for_value(y_true, y_pred):
        return mean_squared_error(y_true, y_pred)
        
    def update_buffer(self, winner):
        if self.train:
            while len(self.temp_state) > 0:
                t = self.temp_state.pop()
                self.buffer.add((t[0], t[1], winner))
    
    def train_batches(self, batch_size, batches=-1, verbose=2):
        if batches == -1:
            s_buffer = np.array([_[0] for _ in self.buffer.buffer])
            p_buffer = np.array([_[1] for _ in self.buffer.buffer])
            v_buffer = np.array([_[2] for _ in self.buffer.buffer])
        else:
            sample_size = batch_size*batches
            sample = []
            while sample_size > 0:
                sample += self.buffer.sample(sample_size)
                sample_size -= self.buffer.size()
            s_buffer = np.array([_[0] for _ in sample])
            p_buffer = np.array([_[1] for _ in sample])
            v_buffer = np.array([_[2] for _ in sample])
        history = self.network.fit(s_buffer, [p_buffer, v_buffer], batch_size=batch_size, epochs=1, verbose=verbose)
        return history
    
    def preprocess_input(self, board, side):
        state = np.zeros((3, 8, 8), dtype=np.int)
        for i in range(8):
            for j in range(8):
                if board[i,j] == 1:
                    state[0,i,j] = 1
                elif board[i,j] == -1:
                    state[1,i,j] = 1
                if side == 1:
                    state[2,i,j] = 1
        return state
    
    def evaluate(self, game, side):
        current_input = self.preprocess_input(game.board, side)
        pred = self.network.predict(current_input[np.newaxis,:])
        return pred[1][0]
    
    def pick_move(self, game, side):
        possible_moves = game.possible_moves(side)
        if len(possible_moves) == 0:
            possible_moves.append((-1,-1))
        monte_prob = self.monte_carlo(game, side)
        
        if self.train:
            self.temp_state.append((self.preprocess_input(game.board, side), np.divide(monte_prob, np.sum(monte_prob))))
        
        monte_prob = np.float_power(monte_prob, 1/self.tau)
        monte_prob = np.divide(monte_prob, np.sum(monte_prob))
        
        r = random()
        for i, move in enumerate(possible_moves):
            r -= monte_prob[Othello.move_id(move)]
            if r <= 0:
                return move
        return possible_moves[-1]
            
    def monte_carlo(self, game, side):
        N = defaultdict(lambda: 0)
        W = defaultdict(lambda: 0)
        Q = defaultdict(lambda: 0)
        P = defaultdict(lambda: 0)
        
        
        possible_moves = game.possible_moves(side)
        if len(possible_moves) == 0:
            policy = np.zeros((65))
            policy[64] = 1
            return policy
        elif len(possible_moves) == 1:
            policy = np.zeros((65))
            policy[Othello.move_id(possible_moves[0])] = 1
            return policy
        
        current_input = self.preprocess_input(game.board, side)
        sid = Othello.state_id(game.board)
        pred = self.network.predict(current_input[np.newaxis,:])
        policy = pred[0][0]
        
        total = 1e-10
        for i, move in enumerate(possible_moves):
            total += policy[Othello.move_id(move)]
          
        for move in possible_moves:
            P[(sid, Othello.move_id(move))] = policy[Othello.move_id(move)]/total
        
        for i in range(self.sim_count):
            #print("Sim #%d"% i)
            clone = deepcopy(game)
            current_side = side
            visited = deque()
            while True:
                possible_moves = clone.possible_moves(current_side)
                if len(possible_moves) == 0:
                    possible_moves.append((-1,-1))
                best_move = None
                best_move_value = -2
                sid = Othello.state_id(clone.board)
                for move in possible_moves:
                    mid = Othello.move_id(move)
                    qu_val = Q[(sid, mid)] + P[(sid, mid)]/(N[(sid, mid)]+1)
                    if qu_val > best_move_value:
                        best_move_value = qu_val
                        best_move = move
                
                #print(best_move)
                
                if N[(sid, Othello.move_id(best_move))] == 0:
                    visited.append((sid, Othello.move_id(best_move)))
                    clone.play_move(best_move[0], best_move[1], current_side)
                    current_side *= -1
                    if clone.game_over():
                        for node in visited:
                            N[node] += 1
                            W[node] += clone.get_winner()*side
                            Q[node] = W[node]/N[node]
                        break
                    
                    current_input = self.preprocess_input(clone.board, current_side)
                    sid = Othello.state_id(clone.board)
                    pred = self.network.predict(current_input[np.newaxis,:])
                    policy = pred[0][0]
                    value = pred[1][0]
                    
                    possible_moves = clone.possible_moves(current_side)
                    if len(possible_moves) == 0:
                        possible_moves.append((-1,-1))
                    total = 1e-10
                    for i, move in enumerate(possible_moves):
                        total += policy[Othello.move_id(move)]
                      
                    for move in possible_moves:
                        P[(sid, Othello.move_id(move))] = policy[Othello.move_id(move)]/total
                    
                    for node in visited:
                        N[node] += 1
                        W[node] += value*side
                        Q[node] = W[node]/N[node]
                    #print()
                    break
                else:
                    visited.append((sid, Othello.move_id(best_move)))
                    clone.play_move(best_move[0], best_move[1], current_side)
                    current_side *= -1
                    if clone.game_over():
                        for node in visited:
                            N[node] += 1
                            W[node] += clone.get_winner()*side
                            Q[node] = W[node]/N[node]
                        break
                             
        policy = np.zeros((65))
        possible_moves = game.possible_moves(side)
        sid = Othello.state_id(game.board)
        for move in possible_moves:
            mid = Othello.move_id(move)
            policy[mid] = N[(sid,mid)]
        
        return policy