Example #1
0
def createDNNModel(input_dim, end_act, loss, outnodes=1):
    config =  DNNConfig()
    neurons = config.getNeurons()
    init_wt = config.getWeightInit()
    dropouts = config.getDropoutRate()
    regs = config.getRegularizer()
    act = config.getActivation()
    optm_algo = config.getAlgorithm()
    lr = config.getLearnRate()
    mom = config.getMomentum()
    
    model = Sequential()

    # add two layers to standardize
    model.add(Dense(input_dim,input_dim=input_dim,name='offset',
                    trainable=False,
                    kernel_initializer = initializers.Identity(gain=1.)))
    model.add(Dense(input_dim,input_dim=input_dim,name='scaler',
                    trainable=False,
                    kernel_initializer = initializers.Identity(gain=1.)))

    for n in range(len(neurons)):
        reg,rv=regs[n].split(":")
        regler = Regularizer_switcher[reg]
        if n == 0:
            model.add(Dense(neurons[n], input_dim = input_dim, 
                            kernel_initializer=init_wt, 
                            kernel_regularizer=regler(float(rv))
                            ))
        else:
            model.add(Dense(neurons[n], kernel_initializer=init_wt,
                            kernel_regularizer=regler(float(rv)) 
                            ))
        
        model.add(BatchNormalization(axis=1))
        model.add(Activation(act[n]))
        if dropouts[n] > 0.0:
            model.add(Dropout(dropouts[n]))
    if lr < 0:
        optm = Optimizer_switcher[optm_algo]()
    else:
        if optm_algo == "SGD":
            optm = Optimizer_switcher[optm_algo](lr=lr,momentum=mom)
        else:
            optm = Optimizer_switcher[optm_algo](lr=lr)
            
    model.add(Dense(outnodes, kernel_initializer=init_wt, activation = end_act,name='output'))
    model.compile(loss = loss, optimizer = optm, metrics=['accuracy'])

    model.summary()
    return model
Example #2
0
 def add_rnn(device):
     with tf.device(device):
         input = layers.Input(batch_shape=(args.batch_size, args.length),
                              dtype="uint8")
         log.info("Added %s", input)
         embedding = layers.Embedding(
             256,
             256,
             embeddings_initializer=initializers.Identity(),
             trainable=False)(input)
         log.info("Added %s", embedding)
     layer = embedding
     layer_sizes = [int(n) for n in args.layers.split(",")]
     for i, nn in enumerate(layer_sizes):
         with tf.device(device):
             layer_type = getattr(layers, args.type)
             ret_seqs = (i < len(layer_sizes) - 1)
             try:
                 layer = layer_type(nn,
                                    return_sequences=ret_seqs,
                                    implementation=2)(layer)
             except TypeError:
                 # implementation kwarg is not present in CuDNN layers
                 layer = layer_type(nn, return_sequences=ret_seqs)(layer)
             log.info("Added %s", layer)
         if args.dropout > 0:
             layer = layers.Dropout(args.dropout)(layer)
             log.info("Added %s", layer)
     return input, layer
def get_tower_model(img_shape):
  model = Sequential()
  model.add(SimpleRNN(hidden_units,
                      kernel_initializer=initializers.RandomUniform(minval=-.1,maxval=.1),
                      recurrent_initializer=initializers.Identity(gain=1.0),
                      activation='relu',
                      batch_input_shape=(None, img_shape[0], img_shape[1])))
  model.add(Dense(num_classes))
  return model
Example #4
0
def create_sequential_model(size):
    model = Sequential()
    model.add(
        SimpleRNN(100,
                  kernel_initializer=initializers.RandomNormal(stddev=0.01),
                  recurrent_initializer=initializers.Identity(gain=0.5),
                  activation='relu',
                  input_shape=size))
    model.add(Dense(classes))
    model.add(Activation('softmax'))
    return model
def getInitializer(init_name, learning_rate, opt, functions):

    if init_name == "rnormal":
        init = initializers.RandomNormal()
    elif init_name == "runiform":
        init = initializers.RandomUniform()
    elif init_name == "varscaling":
        init = initializers.VarianceScaling()
    elif init_name == "orth":
        init = initializers.Orthogonal()
    elif init_name == "id":
        init = initializers.Identity()
    elif init_name == "lecun_uniform":
        init = initializers.lecun_uniform()
    elif init_name == "glorot_normal":
        init = initializers.glorot_normal()
    elif init_name == "glorot_uniform":
        init = initializers.glorot_uniform()
    elif init_name == "he_normal":
        init = initializers.he_normal()
    elif init_name == "he_uniform":
        init = initializers.he_uniform()

    if opt == "Adam":
        optimizer = optimizers.Adam(lr=learning_rate)
    elif opt == "Adagrad":
        optimizer = optimizers.Adagrad(lr=learning_rate)
    elif opt == "Adadelta":
        optimizer = optimizers.Adadelta(lr=learning_rate)
    elif opt == "Adamax":
        optimizer = optimizers.Adamax(lr=learning_rate)
    elif opt == "Nadam":
        optimizer = optimizers.Nadam(lr=learning_rate)
    elif opt == "sgd":
        optimizer = optimizers.SGD(lr=learning_rate)
    elif opt == "RMSprop":
        optimizer = optimizers.RMSprop(lr=learning_rate)

    if functions.startswith("maxout"):
        functions, maxout_k = functions.split("-")
        maxout_k = int(maxout_k)
    else:
        maxout_k = 3
    if functions.startswith("leakyrelu"):
        if "-" in functions:
            functions, maxout_k = functions.split("-")
            maxout_k = float(maxout_k)
        else:
            maxout_k = 0.01

    return init, optimizer, functions, maxout_k
Example #6
0
 def getSimpleRNNModel(self):
     model = Sequential()
     model.add(SimpleRNN(11,
                 kernel_initializer=initializers.RandomNormal(stddev=0.001),
                 recurrent_initializer=initializers.Identity(gain=1.0),
                 activation=LeakyReLU(),
                 return_sequences = False,
                 input_shape=(11,1)))
     model.add(Dense(10, kernel_initializer="uniform", activation=LeakyReLU()))
     model.add(Dense(1, kernel_initializer="uniform", activation='linear'))
     #rmsprop = RMSprop(lr=0.001)
     model.compile(loss='mse', optimizer='adam')
     model.summary()
     return model
Example #7
0
def init_model(hidden_units, inputs, lr):
    model = Sequential()
    model.add(
        SimpleRNN(hidden_units,
                  kernel_initializer=initializers.RandomNormal(stddev=0.001),
                  recurrent_initializer=initializers.Identity(gain=1.0),
                  activation='relu',
                  input_shape=inputs))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))
    rmsprop = RMSprop(lr=lr)
    model.compile(loss='categorical_crossentropy',
                  optimizer=rmsprop,
                  metrics=['accuracy'])
    return model
Example #8
0
def rnn010(num_in,num_out):
    model = Sequential()
    #
    model.add(SimpleRNN(num_in*4,
                    kernel_initializer=initializers.RandomNormal(stddev=0.001),
                    recurrent_initializer=initializers.Identity(gain=1.0),
                    activation='relu',
                    input_shape=(num_in,1)
                    ))
                    
    #
    model.add(Dense(num_out,activation='softmax'))
    #
    rmsprop = RMSprop(lr=1e-6)
    model.compile(loss='categorical_crossentropy',optimizer=rmsprop,metrics=['accuracy'])
    #
    return model
Example #9
0
def get_model(x_train, y_train, x_test, y_test):
    x_train = x_train.reshape(x_train.shape[0], -1, 1)
    x_test = x_test.reshape(x_test.shape[0], -1, 1)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    print('Evaluate IRNN...')
    model = Sequential()
    model.add(SimpleRNN(hidden_units,
                        kernel_initializer=initializers.RandomNormal(stddev=0.001),
                        recurrent_initializer=initializers.Identity(gain=1.0),
                        activation='relu',
                        input_shape=x_train.shape[1:]))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    rmsprop = RMSprop(lr=learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=rmsprop,
                  metrics=['accuracy'])

    model.fit(x_train, y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))

    scores = model.evaluate(x_test, y_test, verbose=0)
    print('IRNN test score:', scores[0])
    print('IRNN test accuracy:', scores[1])
    
    return model
Example #10
0
 def __create_model_rnn(self, learning_rate=0.001, epochs=1, batch_size=32):
     self.batch_size = batch_size  # Size of each batch
     # self.n_epochs = 200
     self.n_epochs = epochs
     hidden_units = 100
     # you should set learning rate to 1e-6
     # clip_norm = 1.0
     self.model = Sequential()
     self.model.add(
         SimpleRNN(
             hidden_units,
             kernel_initializer=initializers.RandomNormal(stddev=0.001),
             recurrent_initializer=initializers.Identity(gain=1.0),
             activation='relu',
             input_shape=self.x_train.shape[1:]))
     self.model.add(Dense(self.num_classes))
     self.model.add(Activation('softmax'))
     rmsprop = RMSprop(lr=learning_rate)
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=rmsprop,
                        metrics=['accuracy'])
     print(self.model.summary())
def create_base_network(input_dim):
    '''Base network to be shared (eq. to feature extraction).
    '''
    seq = Sequential()
    """
    seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
    seq.add(Dropout(0.1))
    seq.add(Dense(128, activation='relu'))
    seq.add(Dropout(0.1))
    seq.add(Dense(128, activation='relu'))
    """
    hidden_units = 100

    seq.add(
        SimpleRNN(hidden_units,
                  kernel_initializer=initializers.RandomNormal(stddev=0.001),
                  recurrent_initializer=initializers.Identity(gain=1.0),
                  activation='relu',
                  input_shape=(None, 15, 12)))
    seq.add(Dense(128, activation='relu'))
    #"""

    return seq
Example #12
0
for i in range(x_test.shape[2]):
    x_test[:, i, :] = scalers[i].transform(x_test[:, i, :])

print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
#y_train = keras.utils.to_categorical(y_train, num_classes)
#y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    SimpleRNN(hidden_units,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              recurrent_initializer=initializers.Identity(gain=1.0),
              activation='relu',
              input_shape=x_train.shape[1:]))
model.add(Dense(num_classes))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=.00001),
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
 pytest.param(
     initializers.truncated_normal(mean=0.2, stddev=0.003, seed=42),
     dict(class_name="truncated_normal", mean=0.2, stddev=0.003, seed=42),
     id="tn_1",
 ),
 pytest.param(
     initializers.Orthogonal(1.1),
     dict(class_name="orthogonal", gain=1.1, seed=None),
     id="o_0",
 ),
 pytest.param(
     initializers.orthogonal(gain=1.2, seed=42),
     dict(class_name="orthogonal", gain=1.2, seed=42),
     id="o_1",
 ),
 pytest.param(initializers.Identity(1.1), dict(class_name="identity", gain=1.1), id="i_0"),
 pytest.param(initializers.identity(), dict(class_name="identity", gain=1.0), id="i_1"),
 #################### VarianceScaling ####################
 pytest.param(
     initializers.glorot_normal(), dict(class_name="glorot_normal", seed=None), id="gn_0"
 ),
 pytest.param(
     initializers.glorot_uniform(42), dict(class_name="glorot_uniform", seed=42), id="gu_0"
 ),
 pytest.param(initializers.he_normal(), dict(class_name="he_normal", seed=None), id="hn_0"),
 pytest.param(
     initializers.he_uniform(42), dict(class_name="he_uniform", seed=42), id="hu_0"
 ),
 pytest.param(
     initializers.lecun_normal(), dict(class_name="lecun_normal", seed=None), id="ln_0"
 ),
Example #14
0
File: dafm.py Project: rvoak/dAFM
    def build(self,
              dafm_type="dafm-afm",
              optimizer="rmsprop",
              learning_rate=0.01,
              activation="linear",
              Q_jk_initialize=0,
              section="",
              section_count=0,
              model1="",
              stateful=False,
              theta_student="False",
              student_count=0,
              binary="False"):

        skills = np.shape(Q_jk_initialize)[1]
        steps = np.shape(Q_jk_initialize)[0]
        self.activation = activation
        if '-' in self.activation:
            activation = self.custom_activation

        if dafm_type.split("_")[-1] == "different":
            skills = int(float(dafm_type.split("_")[-2]) * skills)
            dafm_type = dafm_type.split('_')[0]

        if dafm_type.split("_")[0] == "round-fine-tuned":
            try:
                self.round_threshold = float(dafm_type.split("_")[-1])
                dafm_type = dafm_type.split("_")[0]
            except:
                pass

        q_jk_size = skills
        if '^' in dafm_type:
            q_jk_size = skills
            skills = int(float(dafm_type.split('^')[-1]) * skills)
            dafm_type = dafm_type.split('^')[0]

        self.dafm_type = dafm_type
        if dafm_type == "random-uniform" or dafm_type == "random-normal":
            qtrainable, finetuning, randomize = True, False, True
            self.random_init = dafm_type.split('-')[-1]
        elif dafm_type == "dafm-afm":
            qtrainable, finetuning, randomize = False, False, False
        elif dafm_type == "fine-tuned":
            qtrainable, finetuning, randomize = True, True, False
        elif dafm_type == "kcinitialize":
            qtrainable, finetuning, randomize = True, False, False
        elif dafm_type == "round-fine-tuned":
            # if not self.round_threshold == -1:
            # rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
            # Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
            # Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
            # else:
            Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
            Q_jk1 = np.minimum(
                np.ones(np.shape(Q_jk1)),
                np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
            model1.get_layer("Q_jk").set_weights([Q_jk1])
            return model1
        elif dafm_type == "qjk-dense":
            qtrainable, finetuning, randomize = False, False, False
            activation_dense = activation
        elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
            qtrainable, finetuning, randomize = False, False, True
            self.random_init = dafm_type.split('-')[-1]
            activation_dense = activation
        else:
            print("No Valid Model Found")
            sys.exit()

        if section == "onehot":
            section_input = Input(batch_shape=(None, None, section_count),
                                  name='section_input')
        if not theta_student == "False":
            student_input = Input(batch_shape=(None, None, student_count),
                                  name='student_input')

        virtual_input1 = Input(batch_shape=(None, None, 1),
                               name='virtual_input1')
        if finetuning:
            B_k = TimeDistributed(Dense(
                skills,
                activation='linear',
                kernel_initializer=self.f(
                    model1.get_layer("B_k").get_weights()[0]),
                use_bias=False),
                                  name="B_k")(virtual_input1)
            T_k = TimeDistributed(Dense(
                skills,
                activation='linear',
                kernel_initializer=self.f(
                    model1.get_layer("T_k").get_weights()[0]),
                use_bias=False),
                                  name="T_k")(virtual_input1)
            bias_layer = TimeDistributed(Dense(
                1,
                activation='linear',
                use_bias=False,
                kernel_initializer=self.f(
                    model1.get_layer("bias").get_weights()[0]),
                trainable=True),
                                         name="bias")(virtual_input1)
        else:
            B_k = TimeDistributed(Dense(skills,
                                        activation='linear',
                                        use_bias=False,
                                        trainable=True),
                                  name="B_k")(virtual_input1)
            T_k = TimeDistributed(Dense(skills,
                                        activation='linear',
                                        use_bias=False,
                                        trainable=True),
                                  name="T_k")(virtual_input1)
            bias_layer = TimeDistributed(Dense(
                1,
                activation='linear',
                use_bias=False,
                kernel_initializer=initializers.Zeros(),
                trainable=True),
                                         name="bias")(virtual_input1)

        step_input = Input(batch_shape=(None, None, steps), name='step_input')
        if randomize:
            if binary == "False":
                Q_jk = TimeDistributed(Dense(
                    q_jk_size,
                    use_bias=False,
                    activation=activation,
                    kernel_initializer=self.custom_random),
                                       trainable=qtrainable,
                                       name="Q_jk")(step_input)
            else:
                Q_jk = TimeDistributed(BinaryDense(
                    q_jk_size,
                    use_bias=False,
                    activation=activation,
                    kernel_initializer=self.custom_random),
                                       trainable=qtrainable,
                                       name="Q_jk")(step_input)
        else:
            if binary == "False":
                Q_jk = TimeDistributed(Dense(
                    skills,
                    activation=activation,
                    kernel_initializer=self.f(Q_jk_initialize),
                    use_bias=False,
                    trainable=qtrainable),
                                       trainable=qtrainable,
                                       name="Q_jk")(step_input)
            else:
                Q_jk = TimeDistributed(BinaryDense(
                    skills,
                    activation=activation,
                    kernel_initializer=self.f(Q_jk_initialize),
                    trainable=qtrainable,
                    use_bias=False),
                                       name="Q_jk",
                                       trainable=qtrainable)(step_input)

        if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
            if binary == "False":
                Q_jk = TimeDistributed(Dense(
                    skills,
                    activation=activation_dense,
                    use_bias=False,
                    kernel_initializer=self.custom_random,
                    trainable=True),
                                       name="Q_jk_dense")(Q_jk)
            else:
                Q_jk = TimeDistributed(BinaryDense(
                    skills,
                    activation=activation_dense,
                    use_bias=False,
                    kernel_initializer=self.custom_random,
                    trainable=True),
                                       name="Q_jk_dense")(Q_jk)

        elif dafm_type == "qjk-dense":
            if binary == 'False':
                Q_jk = TimeDistributed(Dense(
                    skills,
                    activation=activation_dense,
                    use_bias=False,
                    kernel_initializer=initializers.Identity(),
                    trainable=True),
                                       name="Q_jk_dense")(Q_jk)
            else:
                Q_jk = TimeDistributed(BinaryDense(
                    skills,
                    activation=activation_dense,
                    use_bias=False,
                    kernel_initializer=initializers.Identity(),
                    trainable=True),
                                       name="Q_jk_dense")(Q_jk)
        else:
            pass

        Qjk_mul_Bk = multiply([Q_jk, B_k])
        sum_Qjk_Bk = TimeDistributed(Dense(
            1,
            activation='linear',
            trainable=False,
            kernel_initializer=initializers.Ones(),
            use_bias=False),
                                     trainable=False,
                                     name="sum_Qjk_Bk")(Qjk_mul_Bk)

        P_k = SimpleRNN(skills,
                        kernel_initializer=initializers.Identity(),
                        recurrent_initializer=initializers.Identity(),
                        use_bias=False,
                        trainable=False,
                        activation='linear',
                        return_sequences=True,
                        name="P_k")(Q_jk)

        Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
        sum_Qjk_Pk_Tk = TimeDistributed(
            Dense(1,
                  activation='linear',
                  trainable=False,
                  kernel_initializer=initializers.Ones(),
                  use_bias=False),
            trainable=False,
            name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
        Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])

        if not (theta_student == "False"):
            if finetuning:
                theta = TimeDistributed(Dense(
                    1,
                    activation="linear",
                    use_bias=False,
                    kernel_initializer=self.f(
                        model1.get_layer("theta").get_weights()[0])),
                                        name='theta')(student_input)
            else:
                theta = TimeDistributed(Dense(1,
                                              activation="linear",
                                              use_bias=False),
                                        name='theta')(student_input)
            Concatenate = concatenate([Concatenate, theta])

        if section == "onehot":
            if finetuning:
                S_k = TimeDistributed(Dense(
                    1,
                    activation="linear",
                    use_bias=False,
                    kernel_initializer=self.f(
                        model1.get_layer("S_k").get_weights()[0])),
                                      name='S_k')(section_input)
            else:
                S_k = TimeDistributed(Dense(1,
                                            activation="linear",
                                            use_bias=False),
                                      name='S_k')(section_input)
            Concatenate = concatenate([Concatenate, S_k])

        output = TimeDistributed(Dense(1,
                                       activation="sigmoid",
                                       trainable=False,
                                       kernel_initializer=initializers.Ones(),
                                       use_bias=False),
                                 trainable=False,
                                 name="output")(Concatenate)
        if section == "onehot" and not (theta_student == "False"):
            model = Model(inputs=[
                virtual_input1, step_input, section_input, student_input
            ],
                          outputs=output)
        elif section == "onehot" and theta_student == "False":
            model = Model(inputs=[virtual_input1, step_input, section_input],
                          outputs=output)
        elif not (section == "onehot") and not (theta_student == "False"):
            model = Model(inputs=[virtual_input1, step_input, student_input],
                          outputs=output)
        else:
            model = Model(inputs=[virtual_input1, step_input], outputs=output)

        d_optimizer = {
            "rmsprop": optimizers.RMSprop(lr=learning_rate),
            "adam": optimizers.Adam(lr=learning_rate),
            "adagrad": optimizers.Adagrad(lr=learning_rate)
        }
        model.compile(optimizer=d_optimizer[optimizer], loss=self.custom_bce)
        return model
Example #15
0
def mnist_irnn(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    x_train = x_train.reshape(x_train.shape[0], -1, 1)
    x_test = x_test.reshape(x_test.shape[0], -1, 1)
    x_val = x_test
    x_train_shape = x_train.shape
    input_shape = x_train_shape[1:]
    num_classes = y_train.shape[1]
    hidden_units = 100
    learning_rate = 1e-6

    with graph.as_default():
        model = Sequential()
        model.add(
            SimpleRNN(
                hidden_units,
                kernel_initializer=initializers.RandomNormal(stddev=0.001),
                recurrent_initializer=initializers.Identity(gain=1.0),
                activation='relu',
                input_shape=input_shape))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))
        rmsprop = RMSprop(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',
                      optimizer=rmsprop,
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Example #16
0
        else:
            if binary=="False":
                Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
            else:   
                Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
                                use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)

        if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
            if binary =="False":
                Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
            else:
                Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)

        elif dafm_type == "qjk-dense":
            if binary =='False':
                Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
            else:
                Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
        else:
            pass
        
        Qjk_mul_Bk = multiply([Q_jk, B_k])
        sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)

        P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)

        Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
        sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
        Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])

        if not (theta_student=="False"):
     dict(
         class_name="truncated_normal", mean=0.2, stddev=0.003,
         seed=42),
     id="tn_1",
 ),
 pytest.param(
     initializers.Orthogonal(1.1),
     dict(class_name="orthogonal", gain=1.1, seed=None),
     id="o_0",
 ),
 pytest.param(
     initializers.orthogonal(gain=1.2, seed=42),
     dict(class_name="orthogonal", gain=1.2, seed=42),
     id="o_1",
 ),
 pytest.param(initializers.Identity(1.1),
              dict(class_name="identity", gain=1.1),
              id="i_0"),
 pytest.param(initializers.identity(),
              dict(class_name="identity", gain=1.0),
              id="i_1"),
 #################### VarianceScaling ####################
 pytest.param(initializers.glorot_normal(),
              dict(class_name="glorot_normal", seed=None),
              id="gn_0"),
 pytest.param(initializers.glorot_uniform(42),
              dict(class_name="glorot_uniform", seed=42),
              id="gu_0"),
 pytest.param(initializers.he_normal(),
              dict(class_name="he_normal", seed=None),
              id="hn_0"),