Ejemplo n.º 1
1
def train_lstm_fusion(X_train, y_train, X_dev, y_dev, embedding_weights, reg=0.0, embed_glove=False):

    '''Trains an lstm network, using my recurrent attention layer,
    which is based on Cheng et al. deep attention fusion and ideas from Section 3.1 of Luong et al. 2015 (http://arxiv.org/pdf/1508.04025v5.pdf)'''

    checkpointer = ModelCheckpoint(filepath="lstm_memfusion_best.hdf5", monitor='val_acc', verbose=1, save_best_only=True) #saves best val loss weights
    input_sentences = Input(shape=(max_sen_length,), dtype='int32')
    if embed_glove: # embed glove vectors
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True, weights=[embedding_weights])(input_sentences)
    else: # or use random embedding
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True)(input_sentences)
    dropout_x = Dropout(0.15)(x)
    lstm_out = LSTM(vocab_dim, dropout_U=0.25, return_sequences=True)(dropout_x)
    context = TDistSoftAttention(LSTMMem(vocab_dim/2, dropout_U=0.25, return_mem=True))(lstm_out)
    # NOTE: attention needs to be twice that of LSTMem for r*cell_in operation to be valid
    attentional_hs = AttnFusion(vocab_dim, dropout_U=0.3, W_regularizer=l2(0.0), U_regularizer=l2(0.0), return_sequences=False)(context)
    attentional_hs = Highway(activity_regularizer=activity_l2(reg))(attentional_hs)
    prediction = Dense(nb_classes, activation='softmax', activity_regularizer=activity_l2(reg))(attentional_hs)
    history = LossHistory()
    val_history = ValLossHistory()
    acc = AccHistory()
    val_acc = ValAccHistory()
    model = Model(input=input_sentences, output=prediction)
    model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(X_train, y_train, nb_epoch=40, batch_size=300, validation_data=(X_dev, y_dev), callbacks=[checkpointer, early_stop_val, history, val_history, acc, val_acc])
    pickle.dump(history.losses, open("lstm_memfusion_trainloss.p", "wb"))
    pickle.dump(val_history.losses, open("lstm_memfusion_devloss.p", "wb"))
    pickle.dump(acc.losses, open("lstm_memfusion_trainacc.p", "wb"))
    pickle.dump(val_acc.losses, open("lstm_memfusion_devacc.p", "wb"))
def create_network():
	model=Sequential()
	#layer 1
	model.add(Convolution2D(10,3 ,3,input_shape=(1,PIXELS,PIXELS) ))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2 , 2)))
	
	
	model.add(Convolution2D(15 , 5, 5, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.2))
	
	model.add(Convolution2D(10 , 3, 3, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.2))
	
	model.add(Flatten())
	model.add(Dense(512 ))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	#layer 7
	model.add(Dense(512 , W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	#layer 8
	model.add(Dense(10))
	model.add(Activation('softmax'))
	
	sgd = SGD(lr=0.01, decay=0.001, momentum=0.9, nesterov=False)
	#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
	model.compile(loss='categorical_crossentropy', optimizer='sgd')
	return model
Ejemplo n.º 3
0
def train_lstm_mem(X_train, y_train, X_dev, y_dev, embedding_weights, reg=0.0, embed_glove=False):

    '''Trains an lstm network with simple attention, using ideas from Section 3.1 of Luong et al. 2015 (http://arxiv.org/pdf/1508.04025v5.pdf) '''

    checkpointer = ModelCheckpoint(filepath="lstm_mem_best.hdf5", monitor='val_acc', verbose=1, save_best_only=True) #saves best val loss weights
    input_sentences = Input(shape=(max_sen_length,), dtype='int32')
    if embed_glove: # embed glove vectors
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True, weights=[embedding_weights])(input_sentences)
    else: # or use random embedding
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True)(input_sentences)
    new_x = Dropout(0.3)(x)
    lstm_comp = LSTM(vocab_dim, dropout_U=0.3, return_sequences=True)
    contextconcat = SoftAttentionConcat(lstm_comp)(new_x)
    attentional_hs = Dense(25, activation='tanh', activity_regularizer=activity_l2(reg))(contextconcat)
    prediction = Dense(nb_classes, activation='softmax', activity_regularizer=activity_l2(reg))(attentional_hs)
    history = LossHistory()
    val_history = ValLossHistory()
    acc = AccHistory()
    val_acc = ValAccHistory()
    model = Model(input=input_sentences, output=prediction)
    model.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(X_train, y_train, nb_epoch=20, batch_size=300, validation_data=(X_dev, y_dev), callbacks=[checkpointer, early_stop_val, history, val_history, acc, val_acc])
    pickle.dump(history.losses, open("lstm_mem_trainloss.p", "wb"))
    pickle.dump(val_history.losses, open("lstm_mem_devloss.p", "wb"))
    pickle.dump(acc.losses, open("lstm_mem_trainacc.p", "wb"))
    pickle.dump(val_acc.losses, open("lstm_mem_devacc.p", "wb"))
Ejemplo n.º 4
0
def buidl_model(x_dim):
    model = Sequential()
    # Dense(64) is a fully-connected layer with 64 hidden units.
    # in the first layer, you must specify the expected input data shape:
    # here, 20-dimensional vectors.
    model.add(Dense(output_dim=64, input_dim=x_dim, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation('linear'))
    model.add(Dropout(0.5))

    model.add(Dense(output_dim=64, input_dim=64, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation('linear'))
    # model.add(Dropout(0.5))

    model.add(Dense(output_dim=1, input_dim=64, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation("linear"))

    # model.add(Dense(output_dim=1, input_dim=x_dim, init='uniform'))
    # model.add(Activation("linear"))

    # model.add(Activation('softmax'))

    # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='mean_squared_error',
                  class_mode='binary',
                  optimizer='rmsprop')

    return model
Ejemplo n.º 5
0
def run_model(max_moves=1214494, nb_epoch=2000, batch_size=100, epsilon=[1, 0.1], epsilon_rate=0.95, \
		lr=0.2, act1='relu', max_val1=20, act2='relu', max_val2=20, hid1=100, hid2=100, error="mse", reg_param=0.01, id=""):

	#ipdb.set_trace()
	t0 = time.time()
	model = Sequential()
	model.add(Flatten(input_shape=(nb_frames, grid_size)))
	#model.add(Dense(hid1, activation=act1, W_regularizer=l2(reg_param), activity_regularizer=activity_l2(reg_param)))
	#model.add(Dense(hid2, activation=act2, W_regularizer=l2(reg_param), activity_regularizer=activity_l2(reg_param)))
	model.add(Dense(hid1, W_regularizer=l2(reg_param), activity_regularizer=activity_l2(reg_param)))
	if act1 == 'clip':
		model.add(Activation('relu', max_value=max_val1))
	else: 
		model.add(Activation(act1))
	model.add(Dense(hid2, W_regularizer=l2(reg_param), activity_regularizer=activity_l2(reg_param)))
	if act2 == 'clip':
		model.add(Activation('relu', max_value=max_val2))
	else: 
		model.add(Activation(act2))
	model.add(Dense(4, W_regularizer=l2(reg_param), activity_regularizer=activity_l2(reg_param)))
	model.compile(sgd(lr=lr), error)

	print "model summary: ", model.summary()

	game = Bboxgame(max_moves=max_moves)
	agent = Agent(model=model)
	print "training"
	agent.train(game, batch_size=batch_size, nb_epoch=nb_epoch, epsilon=epsilon, epsilon_rate=epsilon_rate, id=id)
	#print "playing"
	#agent.play(game)
	t1 = time.time()

	sec = t1 - t0
	#print "sec: ", str(sec)
	hrs = int(sec / 3600)
	sec -= 3600*hrs
	print "hrs: ", str(hrs),
	#print "sec: ", str(sec)
	mins = int(sec / 60)
	sec -= 60*mins
	print " mins: ", str(mins),
	print " sec: ", str(sec)

	if type(epsilon)  in {tuple, list}:
		log("{:^12}|{:^12}|{:^12.3f}{:^6.3f}{:^6.2f}|{:^10.2f}|{:^20}|{:>3.0f}:{:>02.0f}:{:>02.0f} |{:^6.2f}".format(\
			nb_epoch, batch_size, epsilon[0], epsilon[1], epsilon_rate, lr, \
			act1[:4] + '('+str(hid1)+')' + " + " + act2[:4] + '('+str(hid2)+')', \
			hrs, mins, sec, reg_param))
	else:
		log("{:^12}|{:^12}|{:^12.3f}{:^6.3}{:^6}|{:^10.2f}|{:^20}|{:>3.0f}:{:>02.0f}:{:>02.0f} |{:^6.2f}".format(\
			nb_epoch, batch_size, epsilon, "", "", lr, \
			act1[:4] + '('+str(hid1)+')' + " + " + act2[:4] + '('+str(hid2)+')', \
			hrs, mins, sec, reg_param))
Ejemplo n.º 6
0
def create_CNN(
        CNN_filters,                        # # of filters
        CNN_rows,                           # # of rows per filter
        Dense_sizes,                        # matrix of intermediate Dense layers
        Dense_l2_regularizers,              # matrix with the l2 regularizers for the dense layers
        Dense_acivity_l2_regularizers,      # matrix with the l2 activity regularizers for the dense layers
        embeddings,                         # pretrained embeddings or None if there are not any
        max_input_length,                   # maximum length of sentences
        is_trainable,                       # True if the embedding layer is trainable
        opt = 'sgd',                        # optimizer
        emb_size = 200,                      # embedding size if embeddings not given
        input_dim = 500                      # input dimention if embeddings not given
    ):
    out_dim = 5
    model = Sequential()
    if(embeddings != None):
        D = embeddings.shape[-1]
        cols = D
        model.add(Embedding( input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length))
    else:
        D = emb_size
        cols = D
        model.add( Embedding( input_dim = input_dim, output_dim=D, trainable=True, input_length = max_input_length ) )
    model.add(Reshape((1, max_input_length, D)))
    model.add(Convolution2D( CNN_filters, CNN_rows, cols, dim_ordering='th', activation='sigmoid' ))
    sh = model.layers[-1].output_shape
    model.add(MaxPooling2D(pool_size=(sh[-2], sh[-1]),dim_ordering = 'th'))
    model.add(Flatten())
    for i in range(len(Dense_sizes)):
        Dense_size = Dense_sizes[i]
        l2r = Dense_l2_regularizers[i]
        l2ar = Dense_acivity_l2_regularizers[i]
        model.add(
            Dense(
                Dense_size,
                activation = 'sigmoid',
                W_regularizer=l2(l2r),
                activity_regularizer=activity_l2(l2ar)
            )
        )
    l2r = Dense_l2_regularizers[-1]
    l2ar = Dense_acivity_l2_regularizers[-1]
    model.add(
        Dense(
            out_dim,
            activation='linear',
                W_regularizer=l2(l2r),
                activity_regularizer=activity_l2(l2ar)
        )
    )
    model.compile(loss='mse', optimizer=opt)
    return model
Ejemplo n.º 7
0
    def __init__(self,graph, input_node, input_shape, forward_shapes,config):
        self.graph = graph
        self.input_node = input_node
        self.config = config

        self.input_shape = input_shape
        self.dim_ordering = config['dim_ordering']
        if self.dim_ordering == 'th':
            self.depth_axis = 2
            self.steps_axis = 3
        else:
            self.depth_axis = 3
            self.steps_axis = 2

        #TODO: from here
        self.initial_upsampling_size = config['googlenet_config']['output_pooling']['size']
        self.initial_upsampling_type = config['googlenet_config']['output_pooling']['type']


        self.W_regularizer = l2(config['W_regularizer_value'])
        self.b_regularizer = l2(config['b_regularizer_value'])
        self.activity_regularizer = activity_l2(config['activity_regularizer_value'])
        self.init = config['init']
        self.activator = Activation(config['decoder_activator'])

        output_name, output_shape = self.initial_upsampling()
        inception = TDBackwardsInception(self.graph, output_name,output_shape,forward_shapes, config)
        output_name,output_shape = inception.result
        self.result,self.output_shape = self.reverse_conv_layers(output_name,output_shape)
Ejemplo n.º 8
0
def test_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(None, None, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(core.Dense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))
Ejemplo n.º 9
0
def train_bilstm(X_train, y_train, X_dev, y_dev, embedding_weights, reg=0.0, embed_glove=False):

    '''Trains a vanilla bidirectional lstm network '''

    checkpointer = ModelCheckpoint(filepath="bilstm_best.hdf5", monitor='val_acc', verbose=1, save_best_only=True) #saves best val loss weights
    input_sentences = Input(shape=(max_sen_length,), dtype='int32')
    if embed_glove: # embed glove vectors
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True, weights=[embedding_weights])(input_sentences)
    else: # or use random embedding
        x = Embedding(input_dim=vocab_size, output_dim=vocab_dim, input_length=max_sen_length, mask_zero=True)(input_sentences)
    d = Dropout(0.3)(x)
    lstm_1 = LSTM(300, return_sequences=False, dropout_W=0.0, dropout_U=0.3)(d)
    lstm_2 = LSTM(300, return_sequences=False, go_backwards=True, dropout_W=0.0, dropout_U=0.3)(d)
    concat = merge([lstm_1, lstm_2], mode='concat')
    prediction = Dense(nb_classes, activation='softmax', activity_regularizer=activity_l2(reg))(concat)
    history = LossHistory()
    val_history = ValLossHistory()
    acc = AccHistory()
    val_acc = ValAccHistory()
    model = Model(input=input_sentences, output=prediction)
    model.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(X_train, y_train, nb_epoch=20, batch_size=300, validation_data=(X_dev, y_dev), callbacks=[checkpointer, early_stop_val, history, val_history, acc, val_acc])
    pickle.dump(history.losses, open("bilstm_trainloss.p", "wb"))
    pickle.dump(val_history.losses, open("bilstm_devloss.p", "wb"))
    pickle.dump(acc.losses, open("bilstm_trainacc.p", "wb"))
    pickle.dump(val_acc.losses, open("bilstm_devacc.p", "wb"))
Ejemplo n.º 10
0
    def __init__(self,graph, input_node, input_shape, config):
        self.graph = graph
        self.input_node = input_node
        self.config = config

        self.input_shape = input_shape
        self.dim_ordering = config['dim_ordering']
        if self.dim_ordering == 'th':
            self.depth_axis = 2
            self.steps_axis = 3
        else:
            self.depth_axis = 3
            self.steps_axis = 2

        self.final_pool_size = config['googlenet_config']['output_pooling']['size']
        self.final_pool_type = config['googlenet_config']['output_pooling']['type']


        self.W_regularizer = l2(config['W_regularizer_value'])
        self.b_regularizer = l2(config['b_regularizer_value'])
        self.activity_regularizer = activity_l2(config['activity_regularizer_value'])
        self.init = config['init']
        if config['encoder_activator'] == 'prelu':
            self.activator = PReLU(init=self.init)
        #if want to try different activator need to specify here
        else:
            self.activator = Activation(config['encoder_activator'])

        output_name,output_shape = self.first_conv_layers()
        inception = TDInception(self.graph, output_name,output_shape,config)
        output_name,output_shape = inception.result
        self.result, self.output_shape = self.final_pool(output_name, output_shape)
    def __init__(graph, input_node, dim_ordering, output_num_channels, num_base_filters):
        #input should be the same dimension asn output of concatentation of forwards inception layer
        self.graph = graph
        self.input_node = input_node
        #output_num_channels should be the number of channels
        #that the original signal fed into the forward inception unit had
        self.output_num_channels = output_num_channels

        self.num_base_filters = num_base_filters

        assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
        self.dim_ordering = dim_ordering

        self.border_mode = 'same'
        self.W_regularizer = l2(0.01)
        self.b_regularizer = l2(0.01)
        self.activity_regularizer = activity_l2(0.01)
        self.W_constraint = None
        self.b_constraint = None
        self.init = 'glorot_uniform'
        self.activator = Activation('hard_sigmoid')

        self.split_inputs()
        left_branch = self.left_branch()
        left_center_branch = self.left_center_branch()
        right_center_branch = self.right_center_branch()
        right_branch = self.right_branch()
        #avg or sum or max?
        self.result = self.combine_branches(left_branch, left_center_branch,
                    right_center_branch, right_branch, 'sum')
Ejemplo n.º 12
0
def test_A_reg():
    for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
        model = create_model(activity_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
Ejemplo n.º 13
0
    def kerasfnn_regressor(self, testlen, ntrain):
        hsmadata = self.hsmadata
        dates = pd.Series(hsmadata['date'].unique()).sort_values()
        dates.index = range(0, len(dates))
        ntest = len(dates) // testlen

        hsma = pd.DataFrame()
        for i in range(ntrain, ntest):
            traindata = hsmadata[
                (hsmadata['date'] >= dates[(i - ntrain) * testlen])
                & (hsmadata['date'] < dates[i * testlen - self.day])].copy()
            testdata = hsmadata[(hsmadata['date'] >= dates[i * testlen]) & (
                hsmadata['date'] < dates[(i + 1) * testlen])].copy()
            print(dates[i * testlen])

            x_train = np.array(
                traindata.drop(['code', 'date', 'closeratio'], 1))
            y_train = np.array(traindata['closeratio'])

            x_test = np.array(testdata.drop(['code', 'date', 'closeratio'], 1))
            #y_test = np.array(testdata['closeratio'] > 0, dtype=np.int8)

            ###FNN model
            model = Sequential()
            model.add(
                Dense(32,
                      input_dim=x_train.shape[1],
                      W_regularizer=l2(0.01),
                      activity_regularizer=activity_l2(0.01)))
            model.add(Activation('relu'))  #relu
            model.add(Dropout(0.5))
            #model.add(Dense(16, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
            #model.add(Activation('tanh'))
            #model.add(Dropout(0.5))
            #model.add(Dense(8, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
            #model.add(Activation('tanh'))
            #model.add(Dropout(0.5))
            #model.add(Dense(16, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
            #model.add(Activation('tanh'))
            #model.add(Dropout(0.5))
            model.add(Dense(1))
            model.add(Activation('linear'))  #sigmoid tanh

            model.compile(optimizer='rmsprop',
                          loss='mean_squared_error',
                          metrics=['accuracy'])

            model.fit(x_train, y_train, nb_epoch=100, batch_size=100000)

            testdata['predratio'] = model.predict(x_test)

            hsma = pd.concat([hsma, testdata], ignore_index=True)

        return (hsma)
Ejemplo n.º 14
0
def test_A_reg():
    (X_train, Y_train), (X_test, Y_test), test_ids = get_data()
    for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
        model = create_model(activity_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train,
                  Y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
Ejemplo n.º 15
0
def get_model():
    model = Sequential()
    model.add(
        Convolution2D(64,
                      7,
                      7,
                      input_shape=(1, 80, 80),
                      init='glorot_normal',
                      border_mode='same',
                      activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      init='glorot_normal',
                      border_mode='same',
                      activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      init='glorot_normal',
                      border_mode='same',
                      activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Flatten())
    model.add(Dense(256, activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Dense(1024, activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Dense(num_outputs))

    adam = Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='mse', optimizer=adam)

    return model
Ejemplo n.º 16
0
 def build_model(self):
     lstm_branch = []
     for i in range(self.layers_number):
         model1 = Sequential()
         # model1.add(LSTM(self.lstm_hidden, input_dim=self.input_dim, return_sequences=False))
         model1.add(
             LSTM(self.lstm_hidden, input_shape=(self.lstm_timesteps, self.input_dim), return_sequences=False)
         )
         model2 = Sequential()
         # model2.add(Dense(output_dim=6, input_dim=6, activation='relu', W_regularizer=l2(0.001), activity_regularizer=activity_l2(0.001)))
         model2.add(Dense(output_dim=6, input_dim=6))
         model = Sequential()
         model.add(Merge([model1, model2], mode="concat"))
         model.add(
             Dense(
                 self.lstm_hidden + 6,
                 input_dim=self.lstm_hidden + 6,
                 activation="relu",
                 W_regularizer=l2(0.001),
                 activity_regularizer=activity_l2(0.001),
             )
         )
         plot(model, to_file="model_lstm.png", show_shapes=True)
         lstm_branch.append(model)
     # merged = Merge(lstm_branch, mode='concat')
     merged = Merge(lstm_branch)
     final_model = Sequential()
     final_model.add(merged)
     final_model.add(
         Dense(
             self.output_dim,
             input_dim=(self.lstm_hidden + 6) * 3,
             activation="linear",
             W_regularizer=l2(0.001),
             activity_regularizer=activity_l2(0.001),
         )
     )
     # final_model.add(Activation('linear'))
     self.model = final_model
     self.model.compile(loss="mean_squared_error", optimizer="adagrad")
     plot(self.model, to_file="model.png", show_shapes=True)
Ejemplo n.º 17
0
def update_base_outputs(model_base, output_shape, optparam, hidden_type='fc'):
    from keras.models import Model as KerasModel, Sequential
    from keras.layers import Dense, Input
    from keras.regularizers import l2 as activity_l2
    from keras.constraints import max_norm as max_norm_constraint

    n_hidden, n_classes = output_shape
    print('output_shape: "%s"' % str((output_shape)))
    print('Adding %d x %d relu hidden + softmax output layer' %
          (n_hidden, n_classes))

    obj_lambda2 = optparam.get('obj_lambda2', 0.0025)
    obj_param = dict(activity_regularizer=activity_l2(obj_lambda2))

    max_norm = optparam.get('max_norm', np.inf)
    if max_norm != np.inf:
        obj_param['kernel_constraint'] = max_norm_constraint(max_norm)

    model_input_shape = model_base.layers[0].input_shape[0]
    print('model_input_shape=%s' % str(model_input_shape))

    if hidden_type == 'fc':
        hidden_layer = Dense(n_hidden, activation='relu')
    elif hidden_type == 'none':
        hidden_layer = None
    else:
        print('Unknown hidden_type "%s", using "fc"' % hidden_type)
        hidden_layer = Dense(n_hidden, activation='relu')

    output_layer = Dense(n_classes, activation='softmax', **obj_param)

    mclass = model_base.__class__.__name__
    if 1 or mclass == 'Sequential':
        print("Using Sequential model")
        model = Sequential()
        model.add(model_base)
        if hidden_layer:
            model.add(hidden_layer)
        model.add(output_layer)
    else:
        print("Using functional API")
        inputs = model_base.layers[0].get_input_at(0)
        outputs = model_base.layers[-1].get_output_at(0)
        if hidden_layer:
            outputs = hidden_layer(outputs)
        preds = output_layer(outputs)
        model = KerasModel(inputs=inputs, outputs=preds)

    model.n_base_layers = len(model_base.layers)
    model.n_top_layers = len(model.layers) - model.n_base_layers

    return model
Ejemplo n.º 18
0
def get_model():
    model = Sequential()
    model.add(Convolution2D(64, 7, 7, input_shape=(1,80,80), init='glorot_normal', border_mode='same', activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, init='glorot_normal', border_mode='same', activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, init='glorot_normal', border_mode='same', activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2,2)))
    model.add(Flatten())
    model.add(Dense(256, activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Dense(1024, activity_regularizer=activity_l2(0.0000001)))
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(Dense(num_outputs))

    adam = Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='mse', optimizer=adam)

    return model
Ejemplo n.º 19
0
def test_cosinedense():
    from keras import regularizers
    from keras import constraints
    from keras.models import Sequential

    layer_test(core.CosineDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(core.CosineDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 2))

    layer_test(core.CosineDense,
               kwargs={'output_dim': 3},
               input_shape=(None, None, 2))

    layer_test(core.CosineDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(core.CosineDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))

    X = np.random.randn(1, 20)
    model = Sequential()
    model.add(core.CosineDense(1, bias=True, input_shape=(20,)))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = X.T
    W[1] = np.asarray([1.])
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, np.ones((1, 1), dtype=K.floatx()), atol=1e-5)

    X = np.random.randn(1, 20)
    model = Sequential()
    model.add(core.CosineDense(1, bias=False, input_shape=(20,)))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = -2 * X.T
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, -np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
Ejemplo n.º 20
0
def test_highway():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Highway,
               kwargs={},
               input_shape=(3, 2))

    layer_test(core.Highway,
               kwargs={'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))
Ejemplo n.º 21
0
def test_highway():
    from keras import regularizers
    from keras import constraints

    layer_test(core.Highway, kwargs={}, input_shape=(3, 2))

    layer_test(core.Highway,
               kwargs={
                   'W_regularizer': regularizers.l2(0.01),
                   'b_regularizer': regularizers.l1(0.01),
                   'activity_regularizer': regularizers.activity_l2(0.01),
                   'W_constraint': constraints.MaxNorm(1),
                   'b_constraint': constraints.MaxNorm(1)
               },
               input_shape=(3, 2))
Ejemplo n.º 22
0
 def build_model(self):
     lstm_branch = []
     for i in range(self.layers_number):
         model1 = Sequential()
         #model1.add(LSTM(self.lstm_hidden, input_dim=self.input_dim, return_sequences=False))
         model1.add(
             LSTM(self.lstm_hidden,
                  input_shape=(self.lstm_timesteps, self.input_dim),
                  return_sequences=False))
         model2 = Sequential()
         #model2.add(Dense(output_dim=6, input_dim=6, activation='relu', W_regularizer=l2(0.001), activity_regularizer=activity_l2(0.001)))
         model2.add(Dense(output_dim=6, input_dim=6))
         model = Sequential()
         model.add(Merge([model1, model2], mode='concat'))
         model.add(
             Dense(self.lstm_hidden + 6,
                   input_dim=self.lstm_hidden + 6,
                   activation='relu',
                   W_regularizer=l2(0.001),
                   activity_regularizer=activity_l2(0.001)))
         plot(model, to_file='model_lstm.png', show_shapes=True)
         lstm_branch.append(model)
     #merged = Merge(lstm_branch, mode='concat')
     merged = Merge(lstm_branch)
     final_model = Sequential()
     final_model.add(merged)
     final_model.add(
         Dense(self.output_dim,
               input_dim=(self.lstm_hidden + 6) * 3,
               activation='linear',
               W_regularizer=l2(0.001),
               activity_regularizer=activity_l2(0.001)))
     #final_model.add(Activation('linear'))
     self.model = final_model
     self.model.compile(loss="mean_squared_error", optimizer="adagrad")
     plot(self.model, to_file='model.png', show_shapes=True)
Ejemplo n.º 23
0
def test_maxout_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.MaxoutDense, kwargs={'output_dim': 3}, input_shape=(3, 2))

    layer_test(core.MaxoutDense,
               kwargs={
                   'output_dim': 3,
                   'W_regularizer': regularizers.l2(0.01),
                   'b_regularizer': regularizers.l1(0.01),
                   'activity_regularizer': regularizers.activity_l2(0.01),
                   'W_constraint': constraints.MaxNorm(1),
                   'b_constraint': constraints.MaxNorm(1)
               },
               input_shape=(3, 2))
Ejemplo n.º 24
0
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
Ejemplo n.º 25
0
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
Ejemplo n.º 26
0
	def __init__(self, dim_in, encoding_dim, sparsity):
		input_img = Input(shape=(dim_in,))

		regulizer = regularizers.activity_l2(sparsity)
		encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regulizer)(encoded)		

		decoded = Dense(dim_in, activation='sigmoid')(decoded)

		self.autoencoder = Model(input=input_img, output=decoded)

		self.encoder = Model(input=input_img, output=encoded)

		encoded_input = Input(shape=(encoding_dim,))
		decoder_layer = self.autoencoder.layers[-1]
		self.decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))

		self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
Ejemplo n.º 27
0
def test_maxout_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.MaxoutDense, kwargs={"output_dim": 3}, input_shape=(3, 2))

    layer_test(
        core.MaxoutDense,
        kwargs={
            "output_dim": 3,
            "W_regularizer": regularizers.l2(0.01),
            "b_regularizer": regularizers.l1(0.01),
            "activity_regularizer": regularizers.activity_l2(0.01),
            "W_constraint": constraints.MaxNorm(1),
            "b_constraint": constraints.MaxNorm(1),
        },
        input_shape=(3, 2),
    )
Ejemplo n.º 28
0
def update_base_outputs(base_model, **kwargs):
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.regularizers import l2 as activity_l2
    nb_hidden = kwargs.pop('nb_hidden', 1024)
    nb_classes = kwargs.pop('nb_classes', 2)
    obj_lambda2 = kwargs.pop('obj_lambda2', 0.0025)

    print('Adding %d x %d relu hidden + softmax output layer' %
          (nb_hidden, nb_classes))
    model = Sequential()
    model.add(base_model)
    model.add(Dense(nb_hidden, activation='relu'))
    model.add(
        Dense(nb_classes,
              activity_regularizer=activity_l2(obj_lambda2),
              activation='softmax'))
    return model
Ejemplo n.º 29
0
def create_model2(input_dim,
                  h1_unit=16,
                  h2_unit=8,
                  optimizer="adagrad",
                  init="normal"):
    # create model
    model = Sequential()
    model.add(Dense(h1_unit, init=init, input_dim=input_dim,
                    activation="relu"))  #sigmoid
    model.add(Dense(h2_unit, init=init, activation="relu"))
    model.add(
        Dense(1,
              init=init,
              activation='linear',
              activity_regularizer=activity_l2(0.01)))
    # Compile model
    model.compile(loss='mse', optimizer=optimizer)
    return model
Ejemplo n.º 30
0
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense, kwargs={"output_dim": 2, "input_length": 2}, input_shape=(3, 2, 3))

    layer_test(
        core.TimeDistributedDense,
        kwargs={
            "output_dim": 3,
            "W_regularizer": regularizers.l2(0.01),
            "b_regularizer": regularizers.l1(0.01),
            "activity_regularizer": regularizers.activity_l2(0.01),
            "W_constraint": constraints.MaxNorm(1),
            "b_constraint": constraints.MaxNorm(1),
        },
        input_shape=(3, 2, 3),
    )
Ejemplo n.º 31
0
def create_model(input_dim,
                 h1_unit=16,
                 optimizer=adagrad,
                 init="normal",
                 h1_activation="relu"):
    # create model
    model = Sequential()
    model.add(
        Dense(h1_unit,
              init=init,
              input_dim=input_dim,
              activation=h1_activation))  #sigmoid
    model.add(
        Dense(1,
              init=init,
              activation='linear',
              activity_regularizer=activity_l2(0.01)))
    # Compile model
    model.compile(loss=loss_function, optimizer=optimizer)
    return model
Ejemplo n.º 32
0
    def __init__(self, graph, input_node, input_shape, config):
        self.graph = graph
        self.input_node = input_node
        self.input_shape = input_shape
        self.config= config

        self.dim_ordering = config['dim_ordering']
        assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'

        self.W_regularizer = l2(config['W_regularizer_value'])
        self.b_regularizer = l2(config['b_regularizer_value'])
        self.activity_regularizer = activity_l2(config['activity_regularizer_value'])
        self.init = config['init']
        if config['encoder_activator'] == 'prelu':
            self.activator = PReLU(init=self.init)
        #if want to try different activator need to specify here
        else:
            self.activator = Activation(config['encoder_activator'])
        self.inception_config = config['googlenet_config']['inception_config']
        self.result,self.output_shape = self.walk_config_graph()
Ejemplo n.º 33
0
    def __init__(self, graph, input_node, input_shape, config):
        self.graph = graph
        self.input_node = input_node
        self.input_shape = input_shape
        self.config = config

        self.dim_ordering = config["dim_ordering"]
        assert self.dim_ordering in {"tf", "th"}, "dim_ordering must be in {tf, th}"

        self.W_regularizer = l2(config["W_regularizer_value"])
        self.b_regularizer = l2(config["b_regularizer_value"])
        self.activity_regularizer = activity_l2(config["activity_regularizer_value"])
        self.init = config["init"]
        if config["encoder_activator"] == "prelu":
            self.activator = PReLU(init=self.init)
        # if want to try different activator need to specify here
        else:
            self.activator = Activation(config["encoder_activator"])
        self.inception_config = config["googlenet_config"]["inception_config"]
        self.result, self.output_shape = self.walk_config_graph()
Ejemplo n.º 34
0
def model_1_1(input_shape, nb_classes):
    model = Sequential()

    model.add(
        Convolution2D(16, 3, 3, border_mode='same', input_shape=input_shape))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(16, 3, 3, border_mode='same'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dropout(0.65))
    model.add(
        Dense(96,
              W_regularizer=l2(0.00005),
              activity_regularizer=activity_l2(0.00005)))
    model.add(Activation('relu'))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    print "MODEL 1_1"
    return model, "MODEL 1_1"
    def __init__(self, dim_in, encoding_dim, sparsity):
        input_img = Input(shape=(dim_in, ))

        regulizer = regularizers.activity_l2(sparsity)
        encoded = Dense(encoding_dim,
                        activation='relu',
                        activity_regularizer=regulizer)(input_img)

        decoded = Dense(dim_in, activation='sigmoid')(encoded)

        self.autoencoder = Model(input=input_img, output=decoded)

        self.encoder = Model(input=input_img, output=encoded)

        encoded_input = Input(shape=(encoding_dim, ))
        decoder_layer = self.autoencoder.layers[-1]
        self.decoder = Model(input=encoded_input,
                             output=decoder_layer(encoded_input))

        self.autoencoder.compile(optimizer='adadelta',
                                 loss='binary_crossentropy')
Ejemplo n.º 36
0
def create_model(layer):
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
    embedded_sequences = layer(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedded_sequences)
    x = MaxPooling1D(2)(x)
    x = Dropout(0.5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(4)(x)
    x = Dropout(0.5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(7)(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)
    x = Dropout(0.25)(x)
    x = Dense(128,
              input_dim=128,
              W_regularizer=l2(0.01),
              activity_regularizer=activity_l2(0.01))(x)
    x = Dropout(0.25)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    preds = Dense(len(labels_index), activation='softmax')(x)
    return Model(sequence_input, preds)
Ejemplo n.º 37
0
    def define_model(self, input):
        print len(input)
        model = Sequential([
            Dropout(0.1, input_shape=(len(input[0]), )),
            Dense(
                140,
                W_regularizer=l2(0.01),
                activity_regularizer=activity_l2(0.01),
            ),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(32),
            LeakyReLU(alpha=0.2),
            Dense(2),
            Activation("softmax"),
        ])

        adam = Adam(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    decay=0.0)

        model.compile(loss='binary_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

        return model
 def denoisingAutoencoder(self, noise, deep_size):
     entity_vectors = np.asarray(dt.import2dArray(self.vector_path))
     if len(entity_vectors) != 15000:
         entity_vectors = entity_vectors.transpose()
     if self.class_path is None:
         entity_classes = entity_vectors
     else:
         entity_classes = np.asarray(dt.import2dArray(self.class_path))
     input_size = len(entity_vectors[0])
     output_size = len(entity_classes[0])
     if self.dropout_noise is None:
         self.model.add(GaussianNoise(noise, input_shape=(input_size, )))
     else:
         self.model.add(
             Dropout(self.dropout_noise[0], input_shape=(input_size, )))
     if deep_size is not None:
         self.model.add(
             Dense(output_dim=deep_size,
                   input_dim=self.hidden_layer_size,
                   init=self.layer_init,
                   activation=self.hidden_activation,
                   W_regularizer=l2(self.reg),
                   activity_regularizer=activity_l2(self.activity_reg)))
     self.model.add(
         Dense(output_dim=self.hidden_layer_size,
               input_dim=input_size,
               init=self.layer_init,
               activation=self.hidden_activation,
               W_regularizer=l2(self.reg)))
     self.model.add(
         Dense(output_dim=output_size,
               init=self.layer_init,
               activation=self.output_activation,
               W_regularizer=l2(self.reg)))
     self.model.compile(loss=self.loss, optimizer=self.optimizer)
     return entity_vectors, entity_classes
def learn_mlp(X_train,
              y_train,
              X_test,
              y_test,
              nhidden=10,
              n_neurons=100,
              nepochs=200):
    model = Sequential()
    # Initial layer
    model.add(Dense(n_neurons, input_dim=1, activation='relu'))
    # Creating nhidden number of layers
    for i in range(nhidden):
        model.add(
            Dense(n_neurons,
                  activation='relu',
                  W_regularizer=l2(0.01),
                  activity_regularizer=activity_l2(0.01)))

    model.add(Dense(1))
    adam = keras.optimizers.Adam(lr=0.01,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08)
    model.compile(loss='mse', optimizer=adam)
    early_stopping = EarlyStopping(monitor='val_loss', patience=5)
    model.fit(X_train,
              y_train,
              nb_epoch=nepochs,
              batch_size=50,
              validation_data=(X_test, y_test),
              callbacks=[early_stopping],
              verbose=0)

    yprd_tstnn = model.predict(X_test)[:, 0]
    errnn = yprd_tstnn - y_test
    return yprd_tstnn, errnn
 def build_model(self):
     lstm_branch = []
     input_branch = []
     main_input = Input(shape=(self.lstm_timesteps, self.input_dim),
             name='main_input')
     input_branch.append(main_input)
     lstm_out = LSTM(self.lstm_hidden, return_sequences=True)(main_input)
     auxiliary_input = Input(shape=(1,self.lstm_timesteps), name='auxiliary_input')
     input_branch.append(auxiliary_input)
     x1 = merge([auxiliary_input, lstm_out], mode='dot', dot_axes=[2,1], name='merge_lstm_auxi')
     flatten = Reshape((self.lstm_hidden,))(x1)
     c_input = Input(shape=(6,), name='c_input')
     input_branch.append(c_input)
     x2 = merge([flatten, c_input], mode='concat') 
     lstm_out = Dense(self.lstm_hidden, activation='relu', W_regularizer=l2(0.001),
         activity_regularizer=activity_l2(0.001), name="lstm_out")(x2)
     '''
     lstm_out_1 = Dense(self.lstm_hidden, activation='relu', W_regularizer=l2(0.001),
         activity_regularizer=activity_l2(0.001), name="lstm_out_1")(lstm_out)
     '''
     final_loss = Dense(self.output_dim, name='main_output')(lstm_out)
     self.model = Model(input_branch, output=final_loss)
     self.model.compile(loss="mean_squared_error", optimizer="rmsprop")
     plot(self.model, to_file='multiple_model_one.png', show_shapes=True)
Ejemplo n.º 41
0
model = Sequential()
# Normalize data
model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(160, 320, 3)))
# Crop top 70 pixels and bottom 20 pixels of image
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
# Convolutional Layers
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
# Flatten points
model.add(Flatten())
# 5 Densely connected layers
model.add(
    Dense(100, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
model.add(
    Dense(50, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

# double samples_per_epoch and nb_val_samples because of augmented data.
model.fit_generator(train_generator,
                    samples_per_epoch=len(train_samples) * 2,
                    validation_data=validation_generator,
                    nb_val_samples=len(validation_samples) * 2,
                    nb_epoch=3,
                    verbose=1)
Ejemplo n.º 42
0
print('Data Load, test, train complete')
model=Sequential()
#Convolutional Layer
model.add(Convolution2D(10,3,3, border_mode='valid', input_shape=(5,23,23)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(10,3,3))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flatten())
model.add(Dense(32))
model.add(Activation('tanh'))
model.add(Dropout(0.25))

model.add(Dense(1,input_dim=32,W_regularizer=l2(0.01),activity_regularizer=activity_l2(0.01)))
model.add(Activation('linear'))

sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_absolute_error', optimizer='sgd')
model.fit(X_train,y_train, batch_size=1000, nb_epoch=20,show_accuracy=True)
train_vals=model.predict(X_train,batch_size=1000)
test_vals=model.predict(X_test, batch_size=1000)
#print(type(train_vals))
#print(test_vals)
np.savetxt('RAIN_Train_five_layer2_3.csv',np.array(train_vals))
np.savetxt('RAIN_Test_five_layer2_3.csv',np.array(test_vals))
np.savetxt('Y_test_five_layer2_3.csv',y_test)
np.savetxt('Y_train_five_layer2_3.csv',y_train)
#score=model.evaluate(X_test,y_test,batch_size=100,show_accuracy=True)	
#print(score)
Ejemplo n.º 43
0
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from sklearn.preprocessing import StandardScaler
from keras.regularizers import l2, activity_l2
from scipy.interpolate import spline

reg=0.02
reg2=0.02

model=Sequential()
model.add(Dense(4, input_dim=2, init='uniform',W_regularizer=l2(reg), activity_regularizer=activity_l2(reg2)))
model.add(Dense(1, init='uniform',W_regularizer=l2(reg), activity_regularizer=activity_l2(reg2)))
sgd = SGD(lr=0.06, decay=2e-5, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error',optimizer=sgd,metrics=['mean_absolute_error'])

aa=pd.read_csv('GameR.csv',sep=',',header=0)
df=aa[0:2100]
df
y=np.array(df[[2]])
y_train=[item for sublist in y for item in sublist]
y_train=np.array(y_train)
y_train.shape

x=np.array(df)
x1=x.T
x2=[x1[0],x1[1]]
def keras_model():
    from keras.models import Sequential
    from keras.layers.core import Dense
    from keras.regularizers import l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from aiding_funcs.label_handling import MaxMin, MaxMinFit
    import pickle
    print('loading test.p')
    test = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/test.p", "rb" ) )
    print('loading train.p')
    train = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    mins, maxs = MaxMin(train_data['labels'])
    T_l = MaxMinFit(train_data['labels'], mins, maxs)
    t_l = MaxMinFit(validation_data['labels'], mins, maxs)


    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    out_dim = 5
    activity_l2_0 = {{uniform(0, 1)}}
    activity_l2_1 = {{uniform(0, 1)}}
    activity_l2_2 = {{uniform(0, 1)}}
    l2_0 = {{uniform(0, 1)}}
    l2_1 = {{uniform(0, 1)}}
    l2_2 = {{uniform(0, 1)}}

    model = Sequential()
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2(l2_0),activity_regularizer=activity_l2(activity_l2_0),input_dim = train_data['skipthoughts'].shape[-1] ))
    model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2(l2_1),activity_regularizer=activity_l2(activity_l2_1)))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2(l2_2),activity_regularizer=activity_l2(activity_l2_2)))
    model.compile(loss='rmse', optimizer=opt)

    #model.fit(train_data['skipthoughts'], train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
    #score = model.evaluate( train_data['skipthoughts'], train_data['labels'])

    model.fit(train_data['skipthoughts'], T_l, nb_epoch=500, show_accuracy=False, verbose=2)
    score = model.evaluate( train_data['skipthoughts'], T_l)

    print("score : " +str(score))
    return {'loss': score, 'status': STATUS_OK}
Ejemplo n.º 45
0
def create_model(args, initial_mean_value, overal_maxlen, vocab):

    ###############################################################################################################################
    ## Recurrence unit type
    #

    if args.recurrent_unit == 'lstm':
        from keras.layers.recurrent import LSTM as RNN
    elif args.recurrent_unit == 'gru':
        from keras.layers.recurrent import GRU as RNN
    elif args.recurrent_unit == 'simple':
        from keras.layers.recurrent import SimpleRNN as RNN

    ###############################################################################################################################
    ## Create Model
    #

    if args.dropout_w > 0:
        dropout_W = args.dropout_w
    else:
        dropout_W = args.dropout_prob  # default=0.5
    if args.dropout_u > 0:
        dropout_U = args.dropout_u
    else:
        dropout_U = args.dropout_prob  # default=0.1

    cnn_border_mode = 'same'

    if args.model_type == 'reg':
        if initial_mean_value.ndim == 0:
            initial_mean_value = np.expand_dims(initial_mean_value, axis=1)
        num_outputs = len(initial_mean_value)
    else:
        num_outputs = initial_mean_value

    ###############################################################################################################################
    ## Initialize embeddings if requested
    #

    if args.emb_path:

        def my_init(shape, name=None):
            from nea.w2vEmbReader import W2VEmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(args.emb_path, emb_dim=args.emb_dim)
            emb_matrix = np.random.random(shape)
            # 			logger.info(' initial matrix \n %s ' % (emb_matrix,))
            emb_matrix = emb_reader.get_emb_matrix_given_vocab(
                vocab, emb_matrix)
            # 			from keras.backend import set_value, get_value
            # 			set_value(model.layers[model.emb_index].W, get_value(emb_reader.get_emb_matrix_given_vocab(vocab, model.layers[model.emb_index].W)))
            # 			model.layers[model.emb_index].W.set_value(emb_reader.get_emb_matrix_given_vocab(vocab, model.layers[model.emb_index].W.get_value()))
            # 			logger.info(' pre-trained matrix \n %s ' % (emb_matrix,))
            return K.variable(emb_matrix, name=name)

        logger.info(' Use pre-trained embedding')
    else:
        my_init = 'uniform'
        logger.info(' Use default initializing embedding')

    ###############################################################################################################################
    ## Model Stacking
    #

    if args.model_type == 'cls':
        logger.info('Building a CLASSIFICATION model with POOLING')
        dense_activation = 'tanh'
        dense_init = 'glorot_normal'
        final_init = 'glorot_uniform'
        if args.loss == 'cnp':
            final_activation = 'softmax'
        elif args.loss == 'hng':
            final_activation = 'linear'
    elif args.model_type == 'reg':
        logger.info('Building a REGRESSION model with POOLING')
        if args.normalize:
            final_activation = 'sigmoid'
            final_init = 'he_normal'
            dense_activation = 'tanh'
            dense_init = 'he_normal'
        else:
            final_activation = 'relu'
            final_init = 'he_uniform'
            dense_activation = 'tanh'
            dense_init = 'he_uniform'
    else:
        raise NotImplementedError

    sequence = Input(shape=(overal_maxlen, ), dtype='int32')
    x = Embedding(len(vocab),
                  args.emb_dim,
                  mask_zero=True,
                  init=my_init,
                  trainable=args.embd_train)(sequence)

    # Conv Layer
    if args.cnn_dim > 0:
        x = Conv1DWithMasking(nb_filter=args.cnn_dim,
                              filter_length=args.cnn_window_size,
                              border_mode=cnn_border_mode,
                              subsample_length=1)(x)

    # RNN Layer
    if args.rnn_dim > 0:
        forwards = RNN(args.rnn_dim,
                       return_sequences=True,
                       dropout_W=dropout_W,
                       dropout_U=dropout_U)(x)
        if args.bi:
            backwards = RNN(args.rnn_dim,
                            return_sequences=True,
                            dropout_W=dropout_W,
                            dropout_U=dropout_U,
                            go_backwards=True)(x)
        if args.dropout_prob > 0:
            forwards = Dropout(args.dropout_prob)(forwards)
            if args.bi:
                backwards = Dropout(args.dropout_prob)(backwards)
        # Stack 2 Layers
        if args.rnn_2l or args.rnn_3l:
            if args.bi:
                merged = merge([forwards, backwards],
                               mode='concat',
                               concat_axis=-1)
            else:
                merged = forwards
            forwards = RNN(args.rnn_dim,
                           return_sequences=True,
                           dropout_W=dropout_W,
                           dropout_U=dropout_U)(merged)
            if args.bi:
                backwards = RNN(args.rnn_dim,
                                return_sequences=True,
                                dropout_W=dropout_W,
                                dropout_U=dropout_U,
                                go_backwards=True)(merged)
            if args.dropout_prob > 0:
                forwards = Dropout(args.dropout_prob)(forwards)
                if args.bi:
                    backwards = Dropout(args.dropout_prob)(backwards)
            # Stack 3 Layers
            if args.rnn_3l:
                if args.bi:
                    merged = merge([forwards, backwards],
                                   mode='concat',
                                   concat_axis=-1)
                else:
                    merged = forwards
                forwards = RNN(args.rnn_dim,
                               return_sequences=True,
                               dropout_W=dropout_W,
                               dropout_U=dropout_U)(merged)
                if args.bi:
                    backwards = RNN(args.rnn_dim,
                                    return_sequences=True,
                                    dropout_W=dropout_W,
                                    dropout_U=dropout_U,
                                    go_backwards=True)(merged)
                if args.dropout_prob > 0:
                    forwards = Dropout(args.dropout_prob)(forwards)
                    if args.bi:
                        backwards = Dropout(args.dropout_prob)(backwards)

        if args.aggregation == 'mot':
            forwards = MeanOverTime(mask_zero=True)(forwards)
            if args.bi:
                backwards = MeanOverTime(mask_zero=True)(backwards)
                merged = merge([forwards, backwards],
                               mode='concat',
                               concat_axis=-1)
            else:
                merged = forwards
        else:
            raise NotImplementedError

        # Augmented TF/IDF Layer
        if args.tfidf > 0:
            pca_input = Input(shape=(args.tfidf, ), dtype='float32')
            tfidfmerged = merge([merged, pca_input], mode='concat')
        else:
            tfidfmerged = merged

        # Optional Dense Layer
        if args.dense > 0:
            if args.loss == 'hng':
                tfidfmerged = Dense(
                    num_outputs,
                    init=dense_init,
                    W_regularizer=l2(0.001),
                    activity_regularizer=activity_l2(0.001))(tfidfmerged)
            else:
                tfidfmerged = Dense(num_outputs, init=dense_init)(tfidfmerged)
            if final_activation == 'relu' or final_activation == 'linear':
                tfidfmerged = BatchNormalization()(tfidfmerged)
            tfidfmerged = Activation(dense_activation)(tfidfmerged)
            if args.dropout_prob > 0:
                tfidfmerged = Dropout(args.dropout_prob)(tfidfmerged)

        # Final Prediction Layer
        if args.loss == 'hng':
            tfidfmerged = Dense(
                num_outputs,
                init=final_init,
                W_regularizer=l2(0.001),
                activity_regularizer=activity_l2(0.001))(tfidfmerged)
        else:
            tfidfmerged = Dense(num_outputs, init=final_init)(tfidfmerged)
        if final_activation == 'relu' or final_activation == 'linear':
            tfidfmerged = BatchNormalization()(tfidfmerged)
        predictions = Activation(final_activation)(tfidfmerged)

    else:  # if no rnn
        if args.dropout_prob > 0:
            x = Dropout(args.dropout_prob)(x)
        # Mean over Time
        if args.aggregation == 'mot':
            x = MeanOverTime(mask_zero=True)(x)
        else:
            raise NotImplementedError
        # Augmented TF/IDF Layer
        if args.tfidf > 0:
            pca_input = Input(shape=(args.tfidf, ), dtype='float32')
            z = merge([x, pca_input], mode='concat')
        else:
            z = x
        # Optional Dense Layer
        if args.dense > 0:
            if args.loss == 'hng':
                z = Dense(args.dense,
                          init=dense_init,
                          W_regularizer=l2(0.001),
                          activity_regularizer=activity_l2(0.001))(z)
            else:
                z = Dense(args.dense, init=dense_init)(z)
            if final_activation == 'relu' or final_activation == 'linear':
                z = BatchNormalization()(z)
            z = Activation(dense_activation)(z)
            if args.dropout_prob > 0:
                z = Dropout(args.dropout_prob)(z)
        # Final Prediction Layer
        if args.loss == 'hng':
            z = Dense(num_outputs,
                      init=final_init,
                      W_regularizer=l2(0.001),
                      activity_regularizer=activity_l2(0.001))(z)
        else:
            z = Dense(args.dense, init=dense_init)(z)
        if final_activation == 'relu' or final_activation == 'linear':
            z = BatchNormalization()(z)
        predictions = Activation(final_activation)(z)

    # Model Input/Output
    if args.tfidf > 0:
        model = Model(input=[sequence, pca_input], output=predictions)
    else:
        model = Model(input=sequence, output=predictions)

# 	if args.model_type == 'cls':
# 		logger.info('Building a CLASSIFICATION model')
# 		sequence = Input(shape=(overal_maxlen,), dtype='int32')
# 		x = Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train)(sequence)
# 		if args.cnn_dim > 0:
# 			x = Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1)(x)
# 		if args.rnn_dim > 0:
# 			x = RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U)(x)
# 		predictions = Dense(num_outputs, activation='softmax')(x)
# 		model = Model(input=sequence, output=predictions)

# 	elif args.model_type == 'clsp':

# 	elif args.model_type == 'mlp':
# 		logger.info('Building a linear model with POOLING')
# 		sequence = Input(shape=(overal_maxlen,), dtype='int32')
# 		x = Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train)(sequence)
# 		if args.dropout_prob > 0:
# 			x = Dropout(args.dropout_prob)(x)
# 		x = MeanOverTime(mask_zero=True)(x)
# 		if args.tfidf > 0:
# 			z = merge([x,pca_input], mode='concat')
# 		else:
# 			z = x
# 		if args.dense > 0:
# 			z = Dense(args.dense, activation='tanh')(z)
# 			if args.dropout_prob > 0:
# 				z = Dropout(args.dropout_prob)(z)
# 		predictions = Dense(num_outputs, activation='softmax')(z)
# 		if args.tfidf > 0:
# 			model = Model(input=[sequence, pca_input], output=predictions)
# 		else:
# 			model = Model(input=sequence, output=predictions)
#
# 	elif args.model_type == 'reg':
# 		logger.info('Building a REGRESSION model')
# 		model = Sequential()
# 		model.add(Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train))
# 		if args.cnn_dim > 0:
# 			model.add(Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1))
# 		if args.rnn_dim > 0:
# 			model.add(RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U))
# 		if args.dropout_prob > 0:
# 			model.add(Dropout(args.dropout_prob))
# 		model.add(Dense(num_outputs))
# 		if not args.skip_init_bias:
# 			bias_value = (np.log(initial_mean_value) - np.log(1 - initial_mean_value)).astype(K.floatx())
# 			model.layers[-1].b.set_value(bias_value)
# 		model.add(Activation('sigmoid'))
#
# 	elif args.model_type == 'regp':
# 		logger.info('Building a REGRESSION model with POOLING')
# 		model = Sequential()
# 		model.add(Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train))
# 		if args.cnn_dim > 0:
# 			model.add(Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1))
# 		if args.rnn_dim > 0:
# 			model.add(RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U))
# 		if args.dropout_prob > 0:
# 			model.add(Dropout(args.dropout_prob))
# 		if args.aggregation == 'mot':
# 			model.add(MeanOverTime(mask_zero=True))
# 		elif args.aggregation.startswith('att'):
# 			model.add(Attention(op=args.aggregation, activation='tanh', init_stdev=0.01))
# 		model.add(Dense(num_outputs))
# 		if not args.skip_init_bias:
# 			bias_value = (np.log(initial_mean_value) - np.log(1 - initial_mean_value)).astype(K.floatx())
# # 			model.layers[-1].b.set_value(bias_value)
# 			K.set_value(model.layers[-1].b, bias_value)
# 		model.add(Activation('sigmoid'))
#
# 	elif args.model_type == 'breg':
# 		logger.info('Building a BIDIRECTIONAL REGRESSION model')
# 		sequence = Input(shape=(overal_maxlen,), dtype='int32')
# 		output = Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train)(sequence)
# 		if args.cnn_dim > 0:
# 			output = Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1)(output)
# 		if args.rnn_dim > 0:
# 			forwards = RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U)(output)
# 			backwards = RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U, go_backwards=True)(output)
# 		if args.dropout_prob > 0:
# 			forwards = Dropout(args.dropout_prob)(forwards)
# 			backwards = Dropout(args.dropout_prob)(backwards)
# 		merged = merge([forwards, backwards], mode='concat', concat_axis=-1)
# 		densed = Dense(num_outputs)(merged)
# 		if not args.skip_init_bias:
# 			raise NotImplementedError
# 		score = Activation('sigmoid')(densed)
# 		model = Model(input=sequence, output=score)
#
# 	elif args.model_type == 'bregp':
# 		logger.info('Building a BIDIRECTIONAL REGRESSION model with POOLING')
# 		sequence = Input(shape=(overal_maxlen,), dtype='int32')
# 		output = Embedding(len(vocab), args.emb_dim, mask_zero=True, init=my_init, trainable=args.embd_train)(sequence)
# 		if args.cnn_dim > 0:
# 			output = Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1)(output)
# 		if args.rnn_dim > 0:
# 			forwards = RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U)(output)
# 			backwards = RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U, go_backwards=True)(output)
# 		if args.dropout_prob > 0:
# 			forwards = Dropout(args.dropout_prob)(forwards)
# 			backwards = Dropout(args.dropout_prob)(backwards)
# 		forwards_mean = MeanOverTime(mask_zero=True)(forwards)
# 		backwards_mean = MeanOverTime(mask_zero=True)(backwards)
# 		merged = merge([forwards_mean, backwards_mean], mode='concat', concat_axis=-1)
# 		densed = Dense(num_outputs)(merged)
# 		if not args.skip_init_bias:
# 			raise NotImplementedError
# 		score = Activation('sigmoid')(densed)
# 		model = Model(input=sequence, output=score)

    logger.info('  Model Done')
    return model
print('dense:', dense_len)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 67, input_length=maxlen))
model.add(GRU(67))  # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(dense_len, input_dim = maxlen, W_regularizer=l2(0.02), activity_regularizer=activity_l2(0.02)))
model.add(Activation('softmax'))

# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
              optimizer='adam')

print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=10,
          validation_data=(X_test, y_test), show_accuracy=True)
score, acc = model.evaluate(X_test, y_test,
                            batch_size=batch_size,
                            show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
Ejemplo n.º 47
0
    nb_filter=nb_filter,
    filter_length=5,  # 窗口5
    border_mode='valid',
    activation='relu',
    name='conv_window_5')(X_sentence)
pool_output_5 = Lambda(max_1d, output_shape=(nb_filter, ))(cnn_layer_5)

pool_output = merge(
    [pool_output_2, pool_output_3, pool_output_4, pool_output_5],
    mode='concat',
    name='pool_output')
X_dropout = Dropout(0.5)(pool_output)
X_output = Dense(nb_classes,
                 W_constraint=maxnorm(3),
                 W_regularizer=l2(0.01),
                 activity_regularizer=activity_l2(0.01),
                 activation='softmax')(X_dropout)

model = Model(input=[input_sentence, input_position, input_tag],
              output=[X_output])
model.compile(optimizer='adamax',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
#model.summary()
#exit()
print('Train...')
model_path = './model/best_model.hdf5'
modelcheckpoint = ModelCheckpoint(model_path, verbose=1, save_best_only=True)
model.fit([train_sentence, train_position, train_tag], [train_label],
          validation_data=([dev_sentence, dev_position, dev_tag], [dev_label]),
          callbacks=[modelcheckpoint],
Ejemplo n.º 48
0
def get_untrained_model(encoder_dropout=0, decoder_dropout=0, input_dropout=0, reg_W=0, reg_B=0, reg_act=0, LSTM_size=32, dense_size=100, maxpooling=True, data_dim=300, max_len=22, nb_classes=7):
    '''
    Creates a neural network with the specified conditions.
    params:
        encoder_dropout: dropout rate for LSTM encoders (NOT dropout for LSTM internal gates)
        decoder_dropout: dropout rate for decoder
        reg_W: lambda value for weight regularization
        reg_b: lambda value for bias regularization
        reg_act: lambda value for activation regularization
        LSTM_size: number of units in the LSTM layers
        maxpooling: pool over LSTM output at each timestep, or just take the output from the final LSTM timestep
        data_dim: dimension of the input data
        max_len: maximum length of an input sequence (this should be found based on the training data)
        nb_classes: number of classes present in the training data
    '''

    # create regularization objects if needed
    if reg_W != 0:
        W_reg = l2(reg_W)
    else:
        W_reg = None

    if reg_B != 0:
        B_reg = l2(reg_B)
    else:
        B_reg = None

    if reg_act != 0:
        act_reg = activity_l2(reg_act)
    else:
        act_reg = None

    # encode the first entity
    encoder_L = Sequential()

    encoder_L.add(Dropout(input_dropout, input_shape=(data_dim, max_len)))

    # with maxpooling
    if maxpooling:
        encoder_L.add(LSTM(LSTM_size, return_sequences=True, inner_activation="sigmoid"))
        if encoder_dropout != 0:
            encoder_L.add(TimeDistributed(Dropout(encoder_dropout)))
        encoder_L.add(MaxPooling1D(pool_length=LSTM_size))
        encoder_L.add(Flatten())

    # without maxpooling
    else:
        encoder_L.add(Masking(mask_value=0.))
        encoder_L.add(LSTM(LSTM_size, return_sequences=False, inner_activation="sigmoid"))
        if encoder_dropout != 0:
            encoder_L.add(Dropout(encoder_dropout))

    # encode the second entity
    encoder_R = Sequential()

    encoder_R.add(Dropout(input_dropout, input_shape=(data_dim, max_len)))

    # with maxpooling
    if maxpooling:
        encoder_R.add(LSTM(LSTM_size, return_sequences=True, inner_activation="sigmoid"))
        if encoder_dropout != 0:
            encoder_R.add(TimeDistributed(Dropout(encoder_dropout)))
        encoder_R.add(MaxPooling1D(pool_length=LSTM_size))
        encoder_R.add(Flatten())

    else:
    # without maxpooling
        encoder_R.add(Masking(mask_value=0.))
        encoder_R.add(LSTM(LSTM_size, return_sequences=False, inner_activation="sigmoid"))
        if encoder_dropout != 0:
            encoder_R.add(Dropout(encoder_dropout))

    # combine and classify entities as a single relation
    decoder = Sequential()
    decoder.add(Merge([encoder_R, encoder_L], mode='concat'))
    decoder.add(Dense(dense_size, W_regularizer=W_reg, b_regularizer=B_reg, activity_regularizer=act_reg, activation='sigmoid'))
    if decoder_dropout != 0:
        decoder.add(Dropout(decoder_dropout))
    decoder.add(Dense(nb_classes, W_regularizer=W_reg, b_regularizer=B_reg, activity_regularizer=act_reg, activation='softmax'))

    # compile the final model
    decoder.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return decoder
              float(2 * np.sqrt(np.array(history.losses[-1]))))


epochs = 100
learning_rate = 0.06
decay_rate = 5e-6
momentum = 0.9
reg = 0.0002

model = Sequential()
model.add(
    Dense(4,
          input_dim=3,
          init='uniform',
          W_regularizer=l2(reg),
          activity_regularizer=activity_l2(reg)))
model.add(
    Dense(1,
          init='uniform',
          W_regularizer=l2(reg),
          activity_regularizer=activity_l2(reg)))
sgd = SGD(lr=learning_rate,
          momentum=momentum,
          decay=decay_rate,
          nesterov=False)
model.compile(loss='mean_squared_error',
              optimizer=sgd,
              metrics=['mean_absolute_error'])


def step_decay(losses):
Ejemplo n.º 50
0
train_features = train_arch['features']
train_labels = train_arch['labels']
test_features = test_arch['features']
ids = test_arch['ids']

#assert len(ids) == len(test_features)
print "loaded data"
res = np.zeros((len(ids), 2))

num_neurons = 30
model = Sequential()
model.add(
    Dense(num_neurons,
          input_dim=train_features.shape[1],
          W_regularizer=l2(.01),
          activity_regularizer=activity_l2(.01)))
model.add(Activation('sigmoid'))

for i in xrange(3):
    model.add(
        Dense(num_neurons,
              input_dim=num_neurons,
              W_regularizer=l2(.01),
              activity_regularizer=activity_l2(.01)))
    # model.add(Dropout(.5))
    model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(optimizer='sgd', loss='binary_crossentropy')
Ejemplo n.º 51
0
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(
    Dense(1024,
          W_regularizer=l2(0.001),
          activity_regularizer=activity_l2(0.001)))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(
    Dense(1024,
          W_regularizer=l2(0.001),
          activity_regularizer=activity_l2(0.001)))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(4))
model.add(Activation('softmax'))

sgd = keras.optimizers.SGD(lr=0.0625, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
def keras_model():
    from keras.models import Sequential, Graph
    from keras.layers.embeddings import Embedding
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    CNN_filters = {{choice([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200])}}
    CNN_rows = {{choice([1,2,3,4,5,6,7,8,9,10])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size3 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    cols = D
    out_dim = train_data['labels'].shape[-1]
    graph = Graph()
    graph.add_input(name='txt_data', input_shape=[train_data['features'].shape[-1]], dtype='int')
    graph.add_node(Embedding( input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length), name='Emb', input='txt_data')
    graph.add_node(Reshape((1, max_input_length, D)), name = "Reshape", input='Emb')
    graph.add_node( Convolution2D(CNN_filters, CNN_rows, cols, activation='sigmoid' ) , name='Conv', input='Reshape')
    sh = graph.nodes['Conv'].output_shape
    graph.add_node(  MaxPooling2D(pool_size=(sh[-2], sh[-1])) ,  name='MaxPool', input='Conv')
    graph.add_node(  Flatten()  ,  name='Flat', input='MaxPool')
    graph.add_node(  Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dtxt', input='Flat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout1', input='Dtxt')
    graph.add_input(name='av_data', input_shape=[train_data['AV'].shape[-1]])
    graph.add_node(  Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dav', input='av_data')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout2', input='Dav')
    graph.add_node(  Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})),  name='Dense1', inputs=['Dropout2', 'Dropout1'], merge_mode='concat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout3', input='Dense1')
    graph.add_node(  Dense(out_dim, activation='linear')  ,  name='Dense2', input='Dropout3')
    graph.add_output(name='output', input = 'Dense2')
    graph.compile(optimizer=opt, loss={'output':'rmse'})
    graph.fit(
        {
            'txt_data':train_data['features'],
            'av_data':train_data['AV'],
            'output':train_data['labels']
        },
        nb_epoch=500,
        batch_size=64
    )
    scores = graph.evaluate({'txt_data':validation_data['features'], 'av_data':validation_data['AV'], 'output':validation_data['labels']})
    print(scores)
    return {'loss': scores, 'status': STATUS_OK}
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 67, input_length=maxlen))
model.add(GRU(67))  # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(
    Dense(dense_len,
          input_dim=maxlen,
          W_regularizer=l2(0.02),
          activity_regularizer=activity_l2(0.02)))
model.add(Activation('softmax'))

# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy', optimizer='adam')

print("Train...")
model.fit(X_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=10,
          validation_data=(X_test, y_test),
          show_accuracy=True)
score, acc = model.evaluate(X_test,
                            y_test,
                            batch_size=batch_size,
Ejemplo n.º 54
0
def predictAllShop_ANN2_together(all_data, trainAsTest=False, saveFilePath = None, featurePath = None):
    """
    使用所有商家所有数据训练,预测所有商店
    :param trainAsTest: 是否使用训练集后14天作为测试集
    :param model: 某个模型
    :param featurePath
    :return:
    """
    shop_need_to_predict = 2000
    h1_activation = "relu"
    rnn_epoch = 20
    verbose = 2
    h_unit = 16
    h2_unit = 8 
    batch_size = 5
    shop_info = pd.read_csv(Parameter.shopinfopath, names=["shopid","cityname","locationid","perpay","score","comment","level","cate1","cate2","cate3"])

    sameday_backNum = 7
    day_back_num = 7
    week_backnum = 3
    other_features = [statistic_functon_mean,statistic_functon_median]
    other_features = []
    '''将cate1 onehot'''
    cate = shop_info['cate1'].tolist()
    cate_dup = set(cate)
    cates = []
    for i in range(len(cate_dup)):
        cates.append([i])
    hot_encoder = OneHotEncoder().fit(cates)
    dicts = dict(zip(cate_dup, range(len(cate_dup))))
    cate_num = []
    for c in cate:
        cate_num.append([dicts[c]])
    '''cate1 onehot finish'''
    if featurePath is None:

        all_x = None
        all_y = None
        for shopid in range(1, 1 + shop_need_to_predict, 1):
            print "get " , shopid, " train"
            part_data = all_data[all_data.shopid == shopid]
            last_14_real_y = None
            # 取出一部分做训练集
            if trainAsTest: #使用训练集后14天作为测试集的话,训练集为前面部分
                last_14_real_y = part_data[len(part_data) - 14:]["count"].values
                part_data = part_data[0:len(part_data) - 14]
            # print last_14_real_y
            skipNum = part_data.shape[0] - 168
            if skipNum < 0:
                skipNum = 0

            sameday = extractBackSameday(part_data, sameday_backNum, skipNum, nan_method_sameday_mean)
            day = extractBackDay(part_data,day_back_num,skipNum,nan_method_sameday_mean)
            count = extractCount(part_data, skipNum)
            train_x = getOneWeekdayFomExtractedData(sameday)
            train_x = np.concatenate((train_x,getOneWeekdayFomExtractedData(day)),axis=1)
            train_y = getOneWeekdayFomExtractedData(count)
            for feature in other_features:
                value = getOneWeekdayFomExtractedData(extractBackWeekValue(part_data, week_backnum, skipNum, nan_method_sameday_mean, feature))
                train_x = np.append(train_x, value, axis=1)

            '''添加商家信息'''
            # print train_x,train_x.shape
            index = shopid - 1
            oneshopinfo = shop_info.ix[index]
            shop_perpay = oneshopinfo['perpay'] if not pd.isnull(oneshopinfo['perpay']) else 0
            shop_score = oneshopinfo['score'] if not pd.isnull(oneshopinfo['score']) else 0
            shop_comment = oneshopinfo['comment'] if not pd.isnull(oneshopinfo['comment']) else 0
            shop_level = oneshopinfo['level'] if not pd.isnull(oneshopinfo['level']) else 0
            shop_cate1 = oneshopinfo['cate1']
            import warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore",category=DeprecationWarning)
                shop_cate1_encoder = hot_encoder.transform([dicts[shop_cate1]]).toarray()
            train_x = np.insert(train_x,train_x.shape[1],shop_perpay,axis=1)
            train_x = np.insert(train_x,train_x.shape[1],shop_score,axis=1)
            train_x = np.insert(train_x,train_x.shape[1],shop_comment,axis=1)
            train_x = np.insert(train_x,train_x.shape[1],shop_level,axis=1)
            for i in range(shop_cate1_encoder.shape[1]):
                train_x = np.insert(train_x,train_x.shape[1],shop_cate1_encoder[0][i],axis=1)
            '''商家信息添加完毕'''

            if all_x is None:
                all_x = train_x
                all_y = train_y
            else:
                all_x = np.insert(all_x,all_x.shape[0],train_x,axis=0)
                all_y = np.insert(all_y,all_y.shape[0],train_y,axis=0)

                # '''添加周几'''
                # extract_weekday = getOneWeekdayFomExtractedData(extractWeekday(part_data, skipNum))
                # train_x = np.append(train_x, extract_weekday, axis=1)
                # ''''''

                # train_x = train_x.reshape((train_x.shape[0],
                #                            train_x.shape[1], 1))
                # print model.get_weights()
                # part_counts = []
                # for i in range(7):
                #     weekday = i + 1
                #     part_count = getOneWeekdayFomExtractedData(count, weekday)
                #     part_counts.append(part_count)


        train_x = all_x
        train_y = all_y
        featureAndLabel = np.concatenate((train_x,train_y),axis=1)
        flDF = pd.DataFrame(featureAndLabel, columns=["sameday1","sameday2","sameday3","sameday4","sameday5","sameday6","sameday7","day1","day2","day3","day4","day5","day6","day7","perpay","score","comment","level","cate1_1","cate1_2","cate1_3","cate1_4","cate1_5","cate1_6","label"])
        if trainAsTest:
            flDF.to_csv("train_feature/ann1_168.csv")
        else:
            flDF.to_csv("feature/ann1.csv")
    else:#有featurePath文件
        flDF = pd.read_csv(featurePath,index_col=0)
        train_x = flDF.values[:,:-1]
        train_y = flDF.values[:,-1:]
        # print train_x
        # print train_y

    '''将t标准化'''
    x_scaler = MinMaxScaler().fit(train_x)
    y_scaler = MinMaxScaler().fit(train_y)
    train_x = x_scaler.transform(train_x)
    train_y = y_scaler.transform(train_y)
    '''标准化结束'''


    '''构造神经网络'''
    model = Sequential()
    model.add(Dense(h_unit, input_dim=train_x.shape[1], activation=h1_activation)) #sigmoid
    model.add(Dense(h2_unit, activation=h1_activation))
    model.add(Dense(1, activation='linear',activity_regularizer=activity_l2(0.01)))
    sgd = SGD(0.01)
    model.compile(loss="mse", optimizer=sgd)
    # print model.summary()
    # print getrefcount(model)
    # print model.summary()
    model.fit(train_x, train_y, nb_epoch=rnn_epoch, batch_size=batch_size, verbose=verbose)
    ''''''

    format = "%Y-%m-%d"
    if trainAsTest:
        startTime = datetime.datetime.strptime("2016-10-18", format)
    else:
        startTime = datetime.datetime.strptime("2016-11-1", format)
    timedelta = datetime.timedelta(1)


    '''预测所有商家'''
    preficts_all = None
    real_all = None
    for j in range(1, 1 + shop_need_to_predict, 1):
        print "predict:", j
        preficts = []
        part_data = all_data[all_data.shopid == j]
        last_14_real_y = None

        if trainAsTest: #使用训练集后14天作为测试集的话,训练集为前面部分
            last_14_real_y = part_data[len(part_data) - 14:]["count"].values
            part_data = part_data[0:len(part_data) - 14]

        '''预测14天'''
        for i in range(14):
            currentTime = startTime + timedelta * i
            strftime = currentTime.strftime(format)
            # index = getWeekday(strftime) - 1
            # part_count = part_counts[index]
            #取前{sameday_backNum}周同一天的值为特征进行预测
            part_data = part_data.append({"count":0,"shopid":j,"time":strftime,"weekday":getWeekday(strftime)},ignore_index=True)
            x = getOneWeekdayFomExtractedData(extractBackSameday(part_data,sameday_backNum,part_data.shape[0] - 1, nan_method_sameday_mean))
            x = np.concatenate((x,getOneWeekdayFomExtractedData(extractBackDay(part_data,day_back_num,part_data.shape[0]-1,nan_method_sameday_mean))),axis=1)
            for feature in other_features:
                x_value = getOneWeekdayFomExtractedData(extractBackWeekValue(part_data, week_backnum, part_data.shape[0]-1, nan_method_sameday_mean, feature))
                x = np.append(x, x_value, axis=1)
            # '''添加周几'''
            # x = np.append(x, getOneWeekdayFomExtractedData(extractWeekday(part_data, part_data.shape[0]-1)), axis=1)
            # ''''''
            '''添加商家信息'''
            index = j - 1
            oneshopinfo = shop_info.ix[index]
            shop_perpay = oneshopinfo['perpay'] if not pd.isnull(oneshopinfo['perpay']) else 0
            shop_score = oneshopinfo['score'] if not pd.isnull(oneshopinfo['score']) else 0
            shop_comment = oneshopinfo['comment'] if not pd.isnull(oneshopinfo['comment']) else 0
            shop_level = oneshopinfo['level'] if not pd.isnull(oneshopinfo['level']) else 0
            shop_cate1 = oneshopinfo['cate1']
            import warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore",category=DeprecationWarning)
                shop_cate1_encoder = hot_encoder.transform([dicts[shop_cate1]]).toarray()
            x = np.insert(x,x.shape[1],shop_perpay,axis=1)
            x = np.insert(x,x.shape[1],shop_score,axis=1)
            x = np.insert(x,x.shape[1],shop_comment,axis=1)
            x = np.insert(x,x.shape[1],shop_level,axis=1)
            for i in range(shop_cate1_encoder.shape[1]):
                x = np.insert(x,x.shape[1],shop_cate1_encoder[0][i],axis=1)
            '''商家信息添加完毕'''

            x = x_scaler.transform(x)
            # for j in range(sameday_backNum):
            #     x.append(train_y[len(train_y) - (j+1)*7][0])
            # x = np.array(x).reshape((1, sameday_backNum))

            # print x
            # x = x.reshape(1, sameday_backNum, 1)
            predict = model.predict(x)
            if predict.ndim == 2:
                predict = y_scaler.inverse_transform(predict)[0][0]
            elif predict.ndim == 1:
                predict = y_scaler.inverse_transform(predict)[0]

            if(predict <= 0):
                predict == 1
            preficts.append(predict)
            part_data.set_value(part_data.shape[0]-1, "count", predict)

        preficts = (removeNegetive(toInt(np.array(preficts)))).astype(int)
        if preficts_all is None:
            preficts_all = preficts
        else:
            preficts_all = np.insert(preficts_all,preficts_all.shape[0],preficts,axis=0)

        if trainAsTest:
            last_14_real_y = (removeNegetive(toInt(np.array(last_14_real_y)))).astype(int)
            if real_all is None:
                real_all = last_14_real_y
            else:
                real_all = np.insert(real_all,real_all.shape[0],last_14_real_y,axis=0)
                # print preficts,last_14_real_y
            print str(j)+',score:', scoreoneshop(preficts, last_14_real_y)

    # preficts = np.array(preficts)
    preficts_all = preficts_all.reshape((shop_need_to_predict,14))
    if trainAsTest:
        real_all = real_all.reshape((shop_need_to_predict,14))
        preficts_all = np.concatenate((preficts_all,real_all), axis=1)
    preficts_all = np.insert(preficts_all, 0, range(1, shop_need_to_predict+1, 1), axis=1)
    if saveFilePath is not None:
        np.savetxt(saveFilePath+ ("_%d_%d_%s_%d_%s.csv" % (rnn_epoch,h_unit,h1_activation,h2_unit,h1_activation)),preficts_all,fmt="%d",delimiter=",")
    return preficts_all
Ejemplo n.º 55
0
def define_model(input, p):
    """
    Define the Neural Network we are going to use for predictions
    :param input: feature vectors
    :return: the neural net object
    """
    a = 0.1
    model = Sequential([
        # We randomly drop 20% of the input
        # nodes to avoid over fitting
        Dropout(p, input_shape=(len(input[0]), )),

        # Normalize inputs
        BatchNormalization(epsilon=0.001,
                           mode=0,
                           axis=-1,
                           momentum=0.99,
                           weights=None,
                           beta_init='zero',
                           gamma_init='one',
                           gamma_regularizer=None,
                           beta_regularizer=None),

        # Creates the model structure
        Dense(
            512,
            W_regularizer=l2(0.01),
            activity_regularizer=activity_l2(0.01),
        ),
        LeakyReLU(alpha=a),
        Dense(
            256,
            W_regularizer=l2(0.01),
            activity_regularizer=activity_l2(0.01),
        ),
        LeakyReLU(alpha=a),
        Dense(
            128,
            W_regularizer=l2(0.01),
            activity_regularizer=activity_l2(0.01),
        ),
        LeakyReLU(alpha=a),
        Dense(64),
        LeakyReLU(alpha=a),
        Dense(32),
        LeakyReLU(alpha=a),
        Dense(32),
        LeakyReLU(alpha=a),
        Dense(32),
        LeakyReLU(alpha=a),
        Dense(2),
        Activation("softmax"),
    ])

    # Train the model
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0),
                  metrics=['accuracy'])
    return model
 def build_model(self):
     lstm_branch = []
     input_branch = []
     for i in range(self.layers_number):
         main_input = Input(shape=(self.lstm_timesteps, self.input_dim), name="main_input_" + str(i))
         input_branch.append(main_input)
         lstm_out = LSTM(self.lstm_hidden, return_sequences=True)(main_input)
         # auxiliary_input = Input(batch_shape=(self.batch_size,1,self.lstm_timesteps), name='auxiliary_input'+str(i))
         auxiliary_input = Input(shape=(1, self.lstm_timesteps), name="auxiliary_input" + str(i))
         input_branch.append(auxiliary_input)
         """
         x1 = Merge([lstm_out, auxiliary_input], mode=lambda x, y: (x*y).sum(axis=0),
                 name='merge_lstm_auxi'+str(i))
         """
         x1 = merge([auxiliary_input, lstm_out], mode="dot", dot_axes=[2, 1], name="merge_lstm_auxi" + str(i))
         assert x1
         flatten = Reshape((self.lstm_hidden,))(x1)
         c_input = Input(shape=(6,), name="c_input" + str(i))
         input_branch.append(c_input)
         x2 = merge([flatten, c_input], mode="concat")
         x2 = Dense(
             self.lstm_hidden, activation="relu", W_regularizer=l2(0.001), activity_regularizer=activity_l2(0.001)
         )(x2)
         assert x2
         lstm_branch.append(x2)
     lstm_all_out = merge(lstm_branch, mode="sum", name="lstm_all_out")
     """
     dense_relu = Dense(self.lstm_hidden, activation='relu', W_regularizer=l2(0.001),
             activity_regularizer=activity_l2(0.001))(lstm_all_out)
     """
     final_loss = Dense(self.output_dim, name="main_output")(lstm_all_out)
     self.model = Model(input_branch, output=final_loss)
     self.model.compile(loss="mean_squared_error", optimizer="adagrad")
     plot(self.model, to_file="multiple_model.png", show_shapes=True)
Ejemplo n.º 57
0
def sequential_from_Model(m,n_classes):

    s=Sequential()
    s.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1',W_regularizer=l2(0.05),input_shape = (3, 224, 224)))
    s.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.05), name='block1_conv2'))
    s.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
    # Block 2
    s.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.02), name='block2_conv1'))
    s.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.02), name='block2_conv2'))
    s.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
    # Block 3
    s.add(Convolution2D(256, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.02), name='block3_conv1'))
    s.add(Convolution2D(256, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.02) ,name='block3_conv2'))
    s.add(Convolution2D(256, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.03) ,name='block3_conv3'))
    s.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
    # Block 4
    s.add(Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.03), name='block4_conv1'))
    s.add(Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.03), name='block4_conv2'))
    s.add( Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.05), name='block4_conv3'))
    s.add( MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
    # Block 5
    s.add( Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.03), name='block5_conv1'))
    s.add( Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.05), name='block5_conv2'))
    s.add( Convolution2D(512, 3, 3, activation='relu', border_mode='same',W_regularizer=l2(0.05), name='block5_conv3'))
    s.add( MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
    


    #s.set_weights(m.get_weights())
    


    s.add(Flatten())
    

    s.add(Dense(100,activation = 'sigmoid',W_regularizer=l2(0.05),activity_regularizer=activity_l2(0.05)))

    s.add(Dense(30,activation = 'sigmoid',W_regularizer=l2(0.05),activity_regularizer=activity_l2(0.05)))
    s.add(Dropout(0.5))
    s.add(Dense(n_classes,activation='softmax',W_regularizer=l2(0.05),activity_regularizer=activity_l2(0.05)))

    
    


    return s
	X_test_keras = tokenizer.texts_to_sequences([' '.join(l) for l in data[4]])
	X_valid_keras = tokenizer.texts_to_sequences([' '.join(l) for l in data[2]])
	X_train_keras = tokenizer.sequences_to_matrix(X_train_keras)
	X_test_keras = tokenizer.sequences_to_matrix(X_test_keras)
	X_valid_keras = tokenizer.sequences_to_matrix(X_valid_keras)

	n_classes = np.max(y_train) + 1

	Y_train = np_utils.to_categorical(y_train, n_classes)
	Y_test = np_utils.to_categorical(y_test, n_classes)
	Y_valid = np_utils.to_categorical(y_valid, n_classes)

	print('KERAS...')
	### MLP
	model = Sequential()
	model.add(Dense(output_dim=2048, input_dim=X_test_keras.shape[1], init='glorot_normal', W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('tanh'))
	model.add(Dense(output_dim=256, input_dim=2048, init='glorot_normal', W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('tanh'))
	model.add(Dense(output_dim=n_classes, init='glorot_normal'))
	model.add(Activation('softmax'))
	### LSTM
	#model = Sequential()
	#model.add(Embedding(X_train.shape[1], 100))
	#model.add(LSTM(100))
	#model.add(Dropout(0.5))
	#model.add(Dense(5))
	#model.add(Activation('sigmoid'))
	### CNN
	#max_features = 5000
	#maxlen = 100
Ejemplo n.º 59
0
testfile = argv[2]

test_arch = np.load(testfile)
train_arch = np.load(trainfile)
train_features = train_arch['features']
train_labels = train_arch['labels']
test_features = test_arch['features']
ids = test_arch['ids']

#assert len(ids) == len(test_features)
print "loaded data"
res = np.zeros((len(ids),2))

num_neurons = 30
model = Sequential()
model.add(Dense(num_neurons, input_dim = train_features.shape[1], W_regularizer = l2(.01), activity_regularizer = activity_l2(.01)))
model.add(Activation('sigmoid'))

for i in xrange(3):
 model.add(Dense(num_neurons, input_dim = num_neurons, W_regularizer = l2(.01), activity_regularizer = activity_l2(.01)))
 # model.add(Dropout(.5))
 model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(optimizer = 'sgd', loss = 'binary_crossentropy')

model.fit(train_features, train_labels, validation_split = .1, verbose = 1, nb_epoch = 20)
print "Trained"
tpreds = np.ravel(model.predict(train_features))
print np.log(tpreds).dot(train_labels) / len(ids)
Ejemplo n.º 60
0
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)


trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX.shape
# reshape input to be [samples, time steps, features]
trainY = trainY.reshape(len(trainY), 1)
testY = testY.reshape(len(testY), 1)

model = Sequential()
model.add(Dense(4, input_dim=look_back, init=my_init))
model.add(
    Dense(1, W_regularizer=l2(reg), activity_regularizer=activity_l2(reg)))
sgd = SGD(
    lr=learning_rate,
    momentum=momentum,
    decay=decay_rate,
    nesterov=False,
)

model.compile(loss='mean_squared_error', optimizer=sgd)

history = LossHistory()
lrate = LearningRateScheduler(step_decay)

model.fit(trainX,
          trainY,
          nb_epoch=epochs,