예제 #1
0
 def build(self, input_shape):
     self.kernel = self.add_weight(shape=(input_shape[1], 1),
                                   initializer='uniform',
                                   trainable=True,
                                   regularizer=l1(0.1),
                                   constraint=nonneg())
     super(WeightLayer, self).build(input_shape)
예제 #2
0
    def test_nonneg(self):
        from keras.constraints import nonneg

        nonneg_instance = nonneg()

        normed = nonneg_instance(K.variable(self.example_array))
        assert (np.all(np.min(K.eval(normed), axis=1) == 0.))
예제 #3
0
파일: dqn.py 프로젝트: zsimone10/FIFABets
    def _build_model(self, data_dim):
        # Neural Net for Deep-Q learning Model
        minputs = Input(shape=(data_dim, ))
        # $ Arm
        a = Dense(64, activation='sigmoid')(minputs)
        a = Dense(64, activation='sigmoid')(a)
        a = Dense(32, activation='sigmoid')(a)
        a = Dense(32, activation='sigmoid')(a)
        a = Dense(1, activation='sigmoid', W_constraint=nonneg())(a)

        #reward comilation
        r = concatenate([a, minputs])
        r = Dense(512, activation='relu')(r)
        r = Dense(128, activation='relu')(r)
        r = Dense(128, activation='relu')(r)
        r = Dense(64, activation='relu')(r)
        output = Dense(3, activation='linear')(r)
        modelReward = Model(inputs=minputs, outputs=output)
        modelPred = Model(inputs=modelReward.input,
                          outputs=[modelReward.get_layer('dense_5').output])
        plot_model(modelReward, to_file='modelDQNfull.png')
        plot_model(modelPred, to_file='modelDQNPred.png')
        plot_model(modelReward,
                   to_file='dqnmodel.png',
                   show_shapes=True,
                   show_layer_names=False)

        print(modelReward.summary())
        adadelta = optimizers.Adadelta()
        modelReward.compile(loss='mse',
                            optimizer=adadelta,
                            metrics=['accuracy'])
        modelPred.compile(loss='mse', optimizer=adadelta, metrics=['accuracy'])
        return modelReward, modelPred
예제 #4
0
def loadOrCreateModel(modelName, x):

    jsonName = "{}.json".format(modelName)
    h5Name = "{}.h5".format(modelName)

    if (isfile(jsonName) and isfile(h5Name)):

        loaded_model_json = None
        with open(jsonName, "r") as json_file:
            loaded_model_json = json_file.read()

        model = model_from_json(loaded_model_json)
        model.load_weights(h5Name)
        model.compile(loss="mse", optimizer="rmsprop", metrics=["mse"])
        return model
    else:
        model = Sequential()
        model.add(
            LSTM(128,
                 input_shape=(x.shape[1], x.shape[2]),
                 return_sequences=True))
        model.add(LSTM(64, activation="relu", return_sequences=True))
        model.add(LSTM(32, activation="relu", return_sequences=False))

        # model.add(Dense(128, activation="relu"))
        model.add(Dense(1, activation='linear', kernel_constraint=nonneg()))
        # model.add(Dense(1, activation="relu", kernel_constraint=nonneg()))
        model.compile(loss="mse", optimizer="rmsprop", metrics=["mse"])
        model.summary()
        return model
예제 #5
0
    def test_nonneg(self):
        from keras.constraints import nonneg

        nonneg_instance = nonneg()

        normed = nonneg_instance(K.variable(self.example_array))
        assert (np.all(np.min(K.eval(normed), axis=1) == 0.))
예제 #6
0
 def build(self, input_shape):
     if self.noneg == 0:
         self.alpha = self.add_weight(
             name='kernel',
             shape=(1, ),
             initializer=keras.initializers.Constant(value=self.ival),
             constraint=nonneg(),
             trainable=True)
     else:
         self.alpha = self.add_weight(
             name='kernel',
             shape=(1, ),
             initializer=keras.initializers.Constant(value=self.ival),
             constraint=nonneg(),
             trainable=False)
     super(Scale, self).build(input_shape)
예제 #7
0
def baseline_Model_2():
    model = Sequential()
    #  ~model.add(Dense(24, input_dim=24, kernel_initializer='normal', activation='relu'))
    #  ~model.add(Dense(32, input_dim=32, kernel_initializer='normal', activation='relu'))
    model.add(
        Dense(34, input_dim=34, kernel_initializer='normal',
              activation='relu'))
    model.add(Dense(34, kernel_initializer='normal', activation='relu'))
    model.add(Dense(1, kernel_initializer='normal', W_constraint=nonneg()))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
예제 #8
0
def build_model(input_size):
    regulizers = [L1L2(l1=0.0, l2=0.0)]
    model = Sequential()
    model.add(LSTM(32, input_shape=(look_back, input_size)))
    #model.add(Dropout(0.2))
    #model.add(LSTM(10, input_shape=(8, look_back, input_size)))
    model.add(Dense(64, activation='relu'))
    #model.add(Dropout(0.2))

    #model.add(Dense(n_seq))
    model.add(Dense(n_seq, W_constraint=nonneg()))
    model.compile(loss='mean_squared_error',
                  optimizer='adam')  #, metrics=['mse', 'mae', rmse])
    return model
예제 #9
0
 def build(self, input_shape):
     self.alpha = self.add_weight(
         name='kernel',
         shape=(1, ),
         initializer=keras.initializers.Constant(
             self.ialpha),  #constraint=WeightClip(0, 1.0),  # 0<alpha<1
         trainable=True)
     self.tau = self.add_weight(
         name='kernel',
         shape=(1, ),
         initializer=keras.initializers.Constant(self.itau),
         constraint=nonneg(),  # Positive threshold
         trainable=True)
     super(Prox, self).build(input_shape)
예제 #10
0
파일: socnn.py 프로젝트: tkmrhcp/socnn
def build_socnn(input_shape_sig=(128, 1), input_shape_off=(128, 1), dim=1):
    #significant_network
    Input_sig = Input(shape=input_shape_sig, dtype='float32', name='input_sig')
    name = "Significance_Conv_0"
    x = Conv1D(filters=8,kernel_size=ks, padding='same',
           activation='linear', name=name,
           kernel_constraint=maxnorm(norm))(Input_sig)

    for i in range(num_layer_sig-1):
        name = "Significance_Conv_" + str(i+1)
        if i == (num_layer_sig-2):
            fn = dim-1
        else:
            fn = 8
        x = Conv1D(filters=fn,
                kernel_size=ks, padding='same',
                activation='linear', name=name,
                kernel_constraint=maxnorm(norm))(x)

        x = BatchNormalization(name="Significance_BN"+str(i+1))(x)
    output_sig = x

    #offset_network
    Input_off = Input(shape=input_shape_off, dtype='float32', name='input_off')
    name = "Offset_Conv_0"
    y = Conv1D(filters=dim-1,
            kernel_size=ks, padding='same',
            activation='linear', name=name,
            kernel_constraint=maxnorm(norm))(Input_off)

    output_off = keras.layers.add([y, Input_off], name='output_off')
    value = Permute((2, 1))(output_off)

    output_sig = Permute((2, 1))(output_sig)
    output_sig = TimeDistributed(Activation('softmax'), name='softmax')(output_sig)

    #Hn-1 =  𝝈(𝑺) ⨂(𝐨𝐟𝐟+𝒙𝑰)
    H1 = keras.layers.multiply(inputs=[output_sig, value], name='significancemerge')
    #Hn
    H2 = TimeDistributed(Dense(output_length, activation='linear', use_bias=False,
                                kernel_constraint=nonneg() if nonnegative else None),
                                name='out')(H1)
    main_output = Permute((2, 1), name='main_output')(H2)

    model = keras.models.Model(inputs=[Input_sig, Input_off], outputs=[main_output, output_off])
    model.compile(optimizer=keras.optimizers.Adam(lr=lr, clipnorm=clipnorm),
               loss={'main_output': 'mse', 'output_off': 'mse'},
               loss_weights={'main_output': 1., 'output_off': aux_weight})

    return model
예제 #11
0
    def get_flow_model(self, P_init):
        s = kl.Input((self.num_states, ))
        Dense_W = kl.Dense(self.num_states,
                           use_bias=False,
                           W_constraint=nonneg(),
                           weights=[np.array(P_init)])
        logit = Dense_W(s)
        #P = kl.Activation('softmax')(logit)
        P = kl.Lambda(lambda z: z / K.sum(z, axis=-1, keepdims=True))(logit)

        model = km.Model(inputs=[s], outputs=P)

        #model.summary()
        model.compile(loss='categorical_crossentropy', optimizer='adam')
        return model, Dense_W
예제 #12
0
def baseline_Model():
    model = Sequential()
    model.add(
        Dense(24, input_dim=24, kernel_initializer='normal',
              activation='relu'))
    #  ~model.add(Dense(25, input_dim=25, kernel_initializer='normal', activation='relu'))
    #  ~model.add(Dense(32, input_dim=32, kernel_initializer='normal', activation='relu'))
    #  ~model.add(Dense(34, input_dim=34, kernel_initializer='normal', activation='relu'))
    model.add(Dense(6, kernel_initializer='normal', activation='relu'))
    model.add(
        Dense(1,
              kernel_initializer='normal',
              activation='linear',
              W_constraint=nonneg())
    )  #Does not work if input is transformed onto [-1,1]
    #  ~model.add(Dense(1, kernel_initializer='normal'))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
예제 #13
0
def Dense_Model(params,inputs,lr=1e-4):
    import keras
    from keras.models import Sequential
    from keras.layers import Dense, Activation, Dropout
    from keras.wrappers.scikit_learn import KerasRegressor
    from keras.callbacks import EarlyStopping,ModelCheckpoint,LearningRateScheduler
    import tensorflow as tf
    from keras.constraints import nonneg
    # patience=10
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = params['Memory']
    session = tf.compat.v1.Session(config=config)
    model = Sequential()#'relu'
    NUM_GPU = 1 # or the number of GPUs available on your machin
    adam = keras.optimizers.Adam(lr = lr)
    gpu_list = []
    initializer = keras.initializers.glorot_uniform(seed=params['iteration'])
    # print(params['Save']['Weights'])
    for i in range(NUM_GPU): gpu_list.append('gpu(%d)' % i)
    if params['Loss'] == 'Boot_Loss':
        model.add(Dense(params['N'], input_dim=inputs,activation='relu',kernel_initializer=initializer,kernel_constraint=nonneg()))
        model.add(Dense(1,activation='elu',kernel_constraint=nonneg()))
        model.compile(loss=Boot_Loss, optimizer='adam')
    elif params['HiddenLayers']==1:
        model.add(Dense(params['N'], input_dim=inputs,activation=params['Activation'],kernel_initializer=initializer))
        # model.add(Dropout(0.1))
        # model.add(Dense(params['N'], input_dim=inputs,activation='sigmoid',kernel_initializer=initializer))#,kernel_constr
        model.add(Dense(1))
        # model.add(Dense(1,activation='elu',kernel_constraint=nonneg()))
        model.compile(loss=params['Loss'], optimizer='adam')
    else:
        model.add(Dense(params['N'], input_dim=inputs,activation=params['Activation'],kernel_initializer=initializer))
        model.add(Dropout(0.1))
        model.add(Dense(int(params['N']/2), activation=params['Activation']))
        model.add(Dense(1))
        model.compile(loss=params['Loss'], optimizer='adam')#,context=gpu_list) # - Add if using MXNET
    if params['Save']['Weights'] == True:
        # callbacks = [EarlyStopping(monitor='val_loss', patience=params['patience'],verbose=0),#params['Verbose']),
        #      ModelCheckpoint(filepath=params['Spath']+params['Sname']+str(params['iteration'])+'.h5', monitor='val_loss', save_best_only=True)]
        callbacks = [EarlyStopping(monitor='val_loss', patience=params['patience'],verbose=0),#params['Verbose']),
             ModelCheckpoint(filepath=params['Spath']+params['Sname']+str(params['iteration'])+'.h5', monitor='loss', save_best_only=True)]
    else:
        callbacks = [EarlyStopping(monitor='val_loss', patience=params['patience'])]
    return(model,callbacks)
예제 #14
0
def build_model():

    model = Sequential()
    model.add(Dense(500, input_dim=122, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(100, activation='sigmoid'))
    model.add(Dropout(0.3))

    model.add(Dense(1, W_constraint=nonneg(), activation='sigmoid'))

    optimizers = []
    optimizers.append(SGD(lr=.1, momentum=0.1, decay=0.0))
    optimizers.append(RMSprop(lr=0.001, rho=0.9, epsilon=1e-06))
    optimizers.append(Adagrad(lr=0.01, epsilon=1e-06))
    optimizers.append(Adadelta(lr=1.0, rho=0.95, epsilon=1e-06))
    optimizers.append(
        Adam(lr=0.0001 / 2, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
    optimizers.append(Adamax(lr=0.002, beta_1=0.9, beta_2=0.999,
                             epsilon=1e-08))

    model.compile(loss='mean_squared_error', optimizer=optimizers[4])
    return model
예제 #15
0
    def get_bs_particle_graphs(self, s, Dense_W, V, R, temp):
        Dense_W(s)
        W = Dense_W.weights[0]

        Dense_W_ = kl.Dense(self.num_states,
                            use_bias=False,
                            W_constraint=nonneg())
        Dense_W_(s)
        W_ = Dense_W_.weights[0]

        s_embed = kl.Dense(self.num_states, activation='tanh')(s)
        Dense_E = kl.Dense(self.num_states * self.num_states)
        null_input = kl.Lambda(lambda z: 0 * z[:, 0:1])(s_embed)
        E_logit = kl.Dense(self.num_states, activation='tanh')(null_input)
        E_logit = Dense_E(E_logit)
        E_logit = kl.Lambda(lambda z: z / temp)(E_logit)
        E = kl.Activation('sigmoid')(E_logit)
        E = kl.Reshape((self.num_states, self.num_states))(E)

        E = kl.Lambda(lambda z: self.mask * z)(E)

        Dense_W.trainable = False
        logit_1 = Dense_W(s)

        logit_2 = kl.Lambda(lambda z: W_ * (z))(E)
        logit_2 = kl.Dot(axes=1)([logit_2, s])

        logit = kl.Add()([logit_1, logit_2])
        #P = kl.Activation('softmax')(logit)
        P = kl.Lambda(lambda z: z / K.sum(z, axis=-1, keepdims=True))(logit)
        V_ = kl.Dot(axes=-1)([P, V])

        r = kl.Dot(axes=-1)([s, R])

        v = kl.Lambda(lambda z: z[0] + self.gamma * z[1])([r, V_])
        return W, W_, E, P, V_, v
예제 #16
0
encoder_input = Input(shape = X_train.shape[1:],name = 'encoder_input')
decoder_input = Input(shape = (None,latent_dim),name = 'decoder_input')
permute_conv = Permute((2,3,1))
conv1 = TimeDistributed(Conv1D(4, kernel_size =  1),input_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3]))
conv2 = TimeDistributed(Conv1D(2, kernel_size =  1),input_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3]))
conv3 = TimeDistributed(Conv1D(1, kernel_size =  1),input_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3]))
concat = Concatenate()
dropout =  Dropout(dropout_rate)
permute = Permute((2,1,3))
reshape = Reshape((look_back_period,X_train.shape[2]))
encoderLSTM = LSTM(units = latent_dim,return_state = True,return_sequences = True,name = 'enc_LSTM',dropout = dropout_rate)
encoderLSTM2 = LSTM(units = latent_dim,return_state = True,return_sequences = True,name = 'enc_LSTM2',dropout = dropout_rate)
decoderLSTM = LSTM(units = latent_dim,return_state = True,return_sequences = True,name = 'dec_LSTM',dropout = dropout_rate)
reshape2 = Reshape((look_back_period,X_train.shape[2]))
dense_output = TimeDistributed(Dense(X_train.shape[2], activation = 'relu', W_constraint=nonneg()),name = 'time_distirbuted_dense_output')
##
##
## encoder
conv1_out = conv1(encoder_input)
conv2_out = conv2(conv1_out)
conv3_out = conv3(conv2_out)

reshape_out_1 = reshape(conv3_out)

encoder_out =  encoderLSTM(reshape_out_1)
#encoder_out =  encoderLSTM2(encoder_out[0])
encoder_states = encoder_out[1:]
encoder_out = encoder_out[0]

def test_nonneg():
    nonneg_instance = constraints.nonneg()
    normed = nonneg_instance(K.variable(example_array))
    assert(np.all(np.min(K.eval(normed), axis=1) == 0.))
예제 #18
0
model.add(Convolution2D(40, 3, 3))
model.add(LeakyReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
'''

model.add(Flatten())
model.add(Dense(256))
model.add(LeakyReLU())
'''
model.add(Dense(256))
model.add(LeakyReLU())
model.add(Dropout(0.50))
'''

model.add(Dense(1, W_constraint=nonneg()))
model.add(Activation('linear'))

model.compile(loss=rmse, optimizer='adam')

print("--- Compiling model: %s minutes ---" % round(((time.time() - start_time)/60),2))
start_time = time.time()

# Try to work around class inbalance, increases the training size a LOT
'''
df_train2 = pd.DataFrame()
df_train2 = df_train2.append(df_train.ix[np.repeat(df_train.index[(y_train >= 1.0) & (y_train < 1.5)].tolist(), 7)])
df_train2 = df_train2.append(df_train.ix[np.repeat(df_train.index[(y_train >= 1.5) & (y_train < 2.0)].tolist(), 5)])
df_train2 = df_train2.append(df_train.ix[np.repeat(df_train.index[(y_train >= 2.0) & (y_train < 2.5)].tolist(), 1)])
df_train2 = df_train2.append(df_train.ix[np.repeat(df_train.index[(y_train >= 2.5) & (y_train <= 3)].tolist(), 1)])
df_train = df_train2
예제 #19
0
def get_pnet(inputs,
             features,
             genes,
             n_hidden_layers,
             direction,
             activation,
             activation_decision,
             w_reg,
             w_reg_outcomes,
             dropout,
             sparse,
             add_unk_genes,
             batch_normal,
             kernel_initializer,
             use_bias=False,
             shuffle_genes=False,
             attention=False,
             dropout_testing=False,
             non_neg=False,
             sparse_first_layer=True):
    feature_names = {}
    n_features = len(features)
    n_genes = len(genes)

    if not type(w_reg) == list:
        w_reg = [w_reg] * 10

    if not type(w_reg_outcomes) == list:
        w_reg_outcomes = [w_reg_outcomes] * 10

    if not type(dropout) == list:
        dropout = [w_reg_outcomes] * 10

    w_reg0 = w_reg[0]
    w_reg_outcome0 = w_reg_outcomes[0]
    w_reg_outcome1 = w_reg_outcomes[1]
    reg_l = l2
    constraints = {}
    if non_neg:
        from keras.constraints import nonneg
        constraints = {'kernel_constraint': nonneg()}
        # constraints= {'kernel_constraint': nonneg(), 'bias_constraint':nonneg() }
    if sparse:
        if shuffle_genes == 'all':
            ones_ratio = float(n_features) / np.prod([n_genes, n_features])
            logging.info('ones_ratio random {}'.format(ones_ratio))
            mapp = np.random.choice([0, 1],
                                    size=[n_features, n_genes],
                                    p=[1 - ones_ratio, ones_ratio])
            layer1 = SparseTF(n_genes,
                              mapp,
                              activation=activation,
                              W_regularizer=reg_l(w_reg0),
                              name='h{}'.format(0),
                              kernel_initializer=kernel_initializer,
                              use_bias=use_bias,
                              **constraints)
            # layer1 = Diagonal(n_genes, input_shape=(n_features,), activation=activation, W_regularizer=l2(w_reg),
            #           use_bias=use_bias, name='h0', kernel_initializer= kernel_initializer )
        else:
            layer1 = Diagonal(n_genes,
                              input_shape=(n_features, ),
                              activation=activation,
                              W_regularizer=l2(w_reg0),
                              use_bias=use_bias,
                              name='h0',
                              kernel_initializer=kernel_initializer,
                              **constraints)

    else:
        if sparse_first_layer:
            #
            layer1 = Diagonal(n_genes,
                              input_shape=(n_features, ),
                              activation=activation,
                              W_regularizer=l2(w_reg0),
                              use_bias=use_bias,
                              name='h0',
                              kernel_initializer=kernel_initializer,
                              **constraints)
        else:
            layer1 = Dense(n_genes,
                           input_shape=(n_features, ),
                           activation=activation,
                           W_regularizer=l2(w_reg0),
                           use_bias=use_bias,
                           name='h0',
                           kernel_initializer=kernel_initializer)
    outcome = layer1(inputs)
    if attention:
        attention_probs = Diagonal(n_genes,
                                   input_shape=(n_features, ),
                                   activation='sigmoid',
                                   W_regularizer=l2(w_reg0),
                                   name='attention0')(inputs)
        outcome = multiply([outcome, attention_probs], name='attention_mul')

    decision_outcomes = []

    # if reg_outcomes:
    # decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(0), W_regularizer=reg_l(w_reg_outcome0), **constraints)(inputs)
    decision_outcome = Dense(1,
                             activation='linear',
                             name='o_linear{}'.format(0),
                             W_regularizer=reg_l(w_reg_outcome0))(inputs)
    # else:
    #     decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(0))(inputs)

    # testing
    if batch_normal:
        decision_outcome = BatchNormalization()(decision_outcome)

    # decision_outcome = Activation( activation=activation_decision, name='o{}'.format(0))(decision_outcome)

    # first outcome layer
    # decision_outcomes.append(decision_outcome)

    # if reg_outcomes:
    # decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(1), W_regularizer=reg_l(w_reg_outcome1/2.), **constraints)(outcome)
    decision_outcome = Dense(1,
                             activation='linear',
                             name='o_linear{}'.format(1),
                             W_regularizer=reg_l(w_reg_outcome1 / 2.))(outcome)
    # else:
    #     decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(1))(outcome)

    # drop2 = Dropout(dropout, name='dropout_{}'.format(0))
    drop2 = Dropout(dropout[0], name='dropout_{}'.format(0))

    outcome = drop2(outcome, training=dropout_testing)

    # testing
    if batch_normal:
        decision_outcome = BatchNormalization()(decision_outcome)

    decision_outcome = Activation(activation=activation_decision,
                                  name='o{}'.format(1))(decision_outcome)
    decision_outcomes.append(decision_outcome)

    if n_hidden_layers > 0:
        maps = get_layer_maps(genes, n_hidden_layers, direction, add_unk_genes)
        layer_inds = range(1, len(maps))
        # if adaptive_reg:
        #     w_regs = [float(w_reg)/float(i) for i in layer_inds]
        # else:
        #     w_regs = [w_reg] * len(maps)
        # if adaptive_dropout:
        #     dropouts = [float(dropout)/float(i) for i in layer_inds]
        # else:
        #     dropouts = [dropout]*len(maps)
        print 'original dropout', dropout
        print 'dropout', layer_inds, dropout, w_reg
        w_regs = w_reg[1:]
        w_reg_outcomes = w_reg_outcomes[1:]
        dropouts = dropout[1:]
        for i, mapp in enumerate(maps[0:-1]):
            w_reg = w_regs[i]
            w_reg_outcome = w_reg_outcomes[i]
            # dropout2 = dropouts[i]
            dropout = dropouts[1]
            names = mapp.index
            # names = list(mapp.index)
            mapp = mapp.values
            if shuffle_genes in ['all', 'pathways']:
                mapp = shuffle_genes_map(mapp)
            n_genes, n_pathways = mapp.shape
            logging.info('n_genes, n_pathways {} {} '.format(
                n_genes, n_pathways))
            # print 'map # ones {}'.format(np.sum(mapp))
            print 'layer {}, dropout  {} w_reg {}'.format(i, dropout, w_reg)
            layer_name = 'h{}'.format(i + 1)
            if sparse:
                hidden_layer = SparseTF(n_pathways,
                                        mapp,
                                        activation=activation,
                                        W_regularizer=reg_l(w_reg),
                                        name=layer_name,
                                        kernel_initializer=kernel_initializer,
                                        use_bias=use_bias,
                                        **constraints)
            else:
                hidden_layer = Dense(n_pathways,
                                     activation=activation,
                                     W_regularizer=reg_l(w_reg),
                                     name=layer_name,
                                     kernel_initializer=kernel_initializer,
                                     **constraints)

            outcome = hidden_layer(outcome)

            if attention:
                attention_probs = Dense(n_pathways,
                                        activation='sigmoid',
                                        name='attention{}'.format(i + 1),
                                        W_regularizer=l2(w_reg))(outcome)
                outcome = multiply([outcome, attention_probs],
                                   name='attention_mul{}'.format(i + 1))

            # if reg_outcomes:
            # decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(i + 2), W_regularizer=reg_l( w_reg2/(2**i)))(outcome)
            # decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(i + 2), W_regularizer=reg_l( w_reg_outcome), **constraints)(outcome)
            decision_outcome = Dense(
                1,
                activation='linear',
                name='o_linear{}'.format(i + 2),
                W_regularizer=reg_l(w_reg_outcome))(outcome)
            # else:
            #     decision_outcome = Dense(1, activation='linear', name='o_linear{}'.format(i + 2))(outcome)
            # testing
            if batch_normal:
                decision_outcome = BatchNormalization()(decision_outcome)
            decision_outcome = Activation(
                activation=activation_decision,
                name='o{}'.format(i + 2))(decision_outcome)
            decision_outcomes.append(decision_outcome)
            drop2 = Dropout(dropout, name='dropout_{}'.format(i + 1))
            outcome = drop2(outcome, training=dropout_testing)

            feature_names['h{}'.format(i)] = names
            # feature_names.append(names)
        i = len(maps)
        feature_names['h{}'.format(i - 1)] = maps[-1].index
        # feature_names.append(maps[-1].index)
    return outcome, decision_outcomes, feature_names
예제 #20
0
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

model = Sequential()
model.add(Dense(784, 20, W_constraint=maxnorm(1)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(20, 20, W_constraint=nonneg()))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(20, 10, W_constraint=maxnorm(1)))
model.add(Activation('softmax'))

rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)

model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          show_accuracy=True,
          verbose=0)
labels=[]
for i in tmp:
    k=[0]*labelLen
    k[i]=1
    labels.append(k)

X_train=data
Y_train=labels


weights=readEmbedding()

leftmodel = Sequential(name='Embedding')
leftmodel.add(Embedding(input_length=maxlen,input_dim=vocabSize, output_dim=50, mask_zero= True, name='embedding',weights=weights, trainable=False))
rightmodel=Sequential(name='Importance')
rightmodel.add(Embedding(input_length=maxlen,input_dim=vocabSize, output_dim=1, mask_zero= True, name='importance', trainable=True, W_constraint=nonneg()))


model=Sequential()
model.add(Merge([leftmodel,rightmodel], mode=(lambda x: x[0]*K.repeat_elements(x[1],50,2)) , output_shape=(maxlen,50), name='merge'))
model.add(LSTM(output_dim=100, input_length=maxlen, activation='tanh', inner_activation='hard_sigmoid',return_sequences=False,name='lstm'))
model.add(Dense(labels[0].__len__()))
model.add(Activation('sigmoid'))

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['categorical_accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=0.001)

model.fit(x=[X_train,X_train], y=Y_train, batch_size=10, nb_epoch=50)
예제 #22
0
ReLUThres = 0.01
TNS.add(ThresholdedReLU(theta = ReLUThres, input_shape=(nDict,)))
TNS.add(Dense(nDict, activation='linear', bias = True))
Z = TNS(W)
nLayers = 8
for l in range(nLayers-1):
    Y = merge([Z,W],"sum")
    Z = TNS(Y)
Y = merge([Z,W],"sum")
T = ThresholdedReLU(theta = ReLUThres)(Y)

Viso = Lambda(split_last, output_shape=split_last_output_shape)(T)
Vother = Lambda(split_others, output_shape=split_others_output_shape)(T)

normVother = Lambda(lambda x: (x+tau)/(x+tau).norm(1, axis = 1).reshape((x.shape[0],1)))(Vother)
VicK = Dense(2, W_constraint = nonneg(), activation='linear', bias = True)(normVother)
Kappa = Lambda(split_last, output_shape=split_last_output_shape)(VicK)
Vic = Lambda(split_others, output_shape=split_others_output_shape)(VicK)
OD = Lambda(lambda x: 2.0/np.pi*np.arctan(1.0/(x+tau)))(Kappa)

weight = [1.0,1.0,1.0]

epoch = 10
print "nLayers, ReLUThres, epoch, weight, nDict: ", nLayers, ReLUThres, epoch, weight, nDict

### fitting the model ###                    
print "Fitting"    

clf = Model(input=inputs,output=[Vic,OD,Viso])
clf.compile(optimizer=Adam(lr=0.0001), loss='mse', loss_weights = weight)
예제 #23
0
파일: PMEDN.py 프로젝트: serenidpity/MEDN
TNS.add(ThresholdedReLU(theta=ReLUThres, input_shape=(nDict, )))
TNS.add(Dense(nDict, activation='linear', bias=True))
Z = TNS(W)
nLayers = 8
for l in range(nLayers - 1):
    Y = merge([Z, W], "sum")
    Z = TNS(Y)
Y = merge([Z, W], "sum")
T = ThresholdedReLU(theta=ReLUThres)(Y)

Viso = Lambda(split_last, output_shape=split_last_output_shape)(T)
Vother = Lambda(split_others, output_shape=split_others_output_shape)(T)

normVother = Lambda(lambda x: (x + tau) / (x + tau).norm(1, axis=1).reshape(
    (x.shape[0], 1)))(Vother)
VicK = Dense(2, W_constraint=nonneg(), activation='linear',
             bias=True)(normVother)
Kappa = Lambda(split_last, output_shape=split_last_output_shape)(VicK)
Vic = Lambda(split_others, output_shape=split_others_output_shape)(VicK)
OD = Lambda(lambda x: 2.0 / np.pi * np.arctan(1.0 / (x + tau)))(Kappa)

weight = [1.0, 1.0, 1.0]

epoch = 10
#epoch = 15
print("nLayers, ReLUThres, epoch, weight, nDict: %d %r %d %r %d" %
      (nLayers, ReLUThres, epoch, weight, nDict))

### fitting the model ###
print("Fitting")
예제 #24
0
def get_model(args, input_dim, output_dim):
    """Build neural network layers
    If using pretrained autoencoder, assumes the first three hidden layers match

    Args:
        args: dictionary containing experiment parameters, see get_args()
        input_dim: int size of input vector
        output_dim: int size of output vector

    Returns: Keras Model

    """
    kc = None
    bc = None
    opt = get_opt(args['opt'])
    d0 = Dropout(args['drop'], seed=args['h1'])
    d1 = Dropout(args['drop'], seed=args['h2'])
    d2 = Dropout(args['drop'], seed=args['h3'])
    d3 = Dropout(args['drop'], seed=args['h4'])
    d4 = Dropout(args['drop'], seed=args['h5'])
    d5 = Dropout(args['drop'], seed=args['h5'])

    # build network
    input_genes = Input(shape=(input_dim, 1))  # add 3rd dimension for keras-vis
    e = Flatten()(input_genes)
    e = d0(e)
    e = Dense(args['h1'])(e)
    e = BatchNormalization()(e)
    e = Activation(args['act'])(e)
    e = d1(e)
    if args['h2'] > 0:
        e = Dense(args['h2'])(e)
        e = BatchNormalization()(e)
        e = Activation(args['act'])(e)
        e = d2(e)
    if args['h3'] > 0:
        e = Dense(args['h3'])(e)
        e = BatchNormalization()(e)
        e = Activation(args['act'])(e)
        e = d3(e)
    if args['h4'] > 0:
        e = Dense(args['h4'])(e)
        e = BatchNormalization()(e)
        e = Activation(args['act'])(e)
        e = d4(e)
    if args['h5'] > 0:
        e = Dense(args['h5'])(e)
        e = BatchNormalization()(e)
        e = Activation(args['act'])(e)
        e = d5(e)

    if args['predType'] == 'regression':
        metrics = ['mae', 'mape', r2]
        act = 'linear'
        if args['nonneg']:
            kc = nonneg()
            bc = nonneg()
    else:
        metrics = ['acc']
        if args['predType'] == 'multiClass':
            act = 'softmax'
        else:
            act = 'sigmoid'  # binary class

    predictor = Dense(output_dim, name='preds', kernel_constraint=kc, bias_constraint=bc)(e)
    predictor = Activation(act)(predictor)

    model = Model(input_genes, predictor)
    model.compile(loss=args['loss'], optimizer=opt, metrics=metrics)

    model.summary()

    if args['pretrain'] is not None:
        print(' set up ae')
        # set pre-trained weights from ae
        ae = load_model(args['ae_model'], custom_objects={'r2': r2})

        # just set the weights
        model.layers[3].set_weights(ae.layers[1].get_weights())  # 3 dropout layers
        model.layers[7].set_weights(ae.layers[4].get_weights())
        model.layers[11].set_weights(ae.layers[7].get_weights())

        if args['freeze'] == 1:
            # freeze layers
            print(' freeze weights')
            if args['num_ae_layers'] > 0:
                model.layers[3].trainable = False  # 3 dropout layers
            if args['num_ae_layers'] > 1:
                model.layers[7].trainable = False

        del ae

        model = Model(input_genes, predictor)
        model.compile(loss=args['loss'], optimizer=opt, metrics=metrics)
    return model
예제 #25
0
    loss=dat-(tf.matmul(label,mu))
    loss=tf.trace(tf.matmul(tf.scalar_mul(1.0/sampleSize,loss),loss,transpose_b=True))


    norm = tf.reduce_sum(label, 1, keep_dims=True)
    norm = tf.scalar_mul( 100.0, tf.add(norm, minus1))
    norm = tf.matmul(norm,norm,transpose_a=True)
    loss= tf.add(loss,norm)

    loss=tf.reshape(loss,(1,1))
    return loss

opt=RMSprop(lr=0.01, rho=0.5, epsilon=1e-08, decay=0.0)
#cluster network
labelID=Input(batch_shape=(1,sampleSize))
labelEmbedding=Embedding(input_dim=sampleSize,  input_length=sampleSize, output_dim=alpha,W_constraint=kc.nonneg() ,name='embedding')(labelID)
rawInput=Input(batch_shape=(1,sampleSize*dim))
minus1Input=Input(batch_shape=(1,sampleSize))
#rawInput=Dense(input_dim=sampleSize*dim , output_dim=sampleSize* dim, weights=np.eye(sampleSize*dim),activation='linear', trainable=False)(rawInput)
out=merge(inputs=[rawInput,labelEmbedding,minus1Input], mode= LossFunction, output_shape=(1,1))
model=Model(input=[rawInput,labelID,minus1Input],output=out)
model.compile(optimizer=opt,
              loss='mse',
              metrics=['mse'])



X=X_train.reshape((1,sampleSize*dim))
X2=np.asanyarray(range(X_train.__len__())).reshape(1,sampleSize)
X3=np.asanyarray([-1.0]*sampleSize).reshape(1,sampleSize)
target=np.asanyarray([[[0.0]]])
예제 #26
0
        x.append(i)

    pyplot.plot(x, inv_testY, x, inv_yhat)
    pyplot.show()

else:

    #Set network
    model = Sequential()
    model.add(
        LSTM(50,
             input_shape=(train_X.shape[1], train_X.shape[2]),
             return_sequences=True))
    model.add(Dropout(0.25))
    model.add(LSTM(20))
    model.add(Dense(1, activation='linear', kernel_constraint=nonneg()))

    model.compile(loss="mse", optimizer="Nadam", metrics=["mse"])
    history = model.fit(train_X,
                        train_y,
                        epochs=40,
                        batch_size=n_train_weeks,
                        validation_data=(test_X, test_y),
                        verbose=2,
                        shuffle=False)

    #Save model
    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    #seralize weights to HDF5