Ejemplo n.º 1
0
 def build_model(self):
     # Remark: Share weights for embedding is not supported.
     # Thus here the model takes concatenated input and slice to split the input.
     input = Input(name='input',
                   shape=(self.text1_length + self.text2_length, ))
     embedding = Embedding(self.vocab_size,
                           self.embed_size,
                           weights=self.embed_weights,
                           trainable=self.train_embed)(input)
     query_embed = embedding.slice(1, 0, self.text1_length)
     doc_embed = embedding.slice(1, self.text1_length, self.text2_length)
     mm = A.batch_dot(query_embed, doc_embed,
                      axes=[2, 2])  # Translation Matrix.
     KM = []
     for i in range(self.kernel_num):
         mu = 1. / (self.kernel_num - 1) + (2. * i) / (self.kernel_num -
                                                       1) - 1.0
         sigma = self.sigma
         if mu > 1.0:  # Exact match.
             sigma = self.exact_sigma
             mu = 1.0
         mm_exp = A.exp((-0.5) * (mm - mu) * (mm - mu) / sigma / sigma)
         mm_doc_sum = A.sum(mm_exp, axis=2)
         mm_log = A.log(mm_doc_sum + 1.0)
         # Remark: Keep the reduced dimension for the last sum and squeeze after stack.
         # Otherwise, when batch=1, the output will become a Scalar not compatible for stack.
         mm_sum = A.sum(mm_log, axis=1, keepDims=True)
         KM.append(mm_sum)
     Phi = Squeeze(2)(A.stack(KM))
     if self.target_mode == "ranking":
         output = Dense(1, init="uniform")(Phi)
     else:
         output = Dense(1, init="uniform", activation="sigmoid")(Phi)
     model = Model(input=input, output=output)
     return model
Ejemplo n.º 2
0
 def build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.summary()
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Ejemplo n.º 3
0
 def _build_model(sequence_length):
     model = Sequential()
     model.add(Embedding(20, 10, input_length=sequence_length))
     model.add(Convolution1D(4, 3))
     model.add(Flatten())
     model.add(Dense(5, activation="softmax"))
     return model
Ejemplo n.º 4
0
 def _build_mode(self):
     print("Now we build the model")
     model = Sequential()
     model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same',
                             input_shape=(IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)))  # 80*80*4
     model.add(Activation('relu'))
     model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
     model.add(Activation('relu'))
     model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
     model.add(Activation('relu'))
     model.add(Flatten())
     model.add(Dense(512))
     model.add(Activation('relu'))
     model.add(Dense(2))
     
     # get the 1 * 2 output represent each action's probability
     model.add(Activation('softmax'))
     return model
Ejemplo n.º 5
0
        ImageCenterCrop(224, 224),
        ImageChannelNormalize(123.0, 117.0, 104.0),
        ImageMatToTensor(),
        ImageFeatureToTensor()
    ])

    full_model = Net.load_bigdl(model_path)
    # create a new model by remove layers after pool5/drop_7x7_s1
    model = full_model.new_graph(["pool5/drop_7x7_s1"])
    # freeze layers from input to pool4/3x3_s2 inclusive
    model.freeze_up_to(["pool4/3x3_s2"])

    inputNode = Input(name="input", shape=(3, 224, 224))
    inception = model.to_keras()(inputNode)
    flatten = Flatten()(inception)
    logits = Dense(2)(flatten)

    lrModel = Model(inputNode, logits)

    classifier = NNClassifier(lrModel, CrossEntropyCriterion(), transformer) \
        .setLearningRate(0.003).setBatchSize(40).setMaxEpoch(1).setFeaturesCol("image")

    pipeline = Pipeline(stages=[classifier])

    catdogModel = pipeline.fit(trainingDF)
    predictionDF = catdogModel.transform(validationDF).cache()
    predictionDF.show()

    correct = predictionDF.filter("label=prediction").count()
    overall = predictionDF.count()
    accuracy = correct * 1.0 / overall
Ejemplo n.º 6
0
# Build the model
model = Sequential()

model.add(LSTM(
    input_shape=(x_train.shape[1], x_train.shape[-1]),
    output_dim=20,
    return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(
    10,
    return_sequences=False))
model.add(Dropout(0.2))

model.add(Dense(
    output_dim=1))

model.compile(loss='mse', optimizer='rmsprop')

%%time
# Train the model
print("Training begins.")
model.fit(
    x_train,
    y_train,
    batch_size=1024,
    nb_epoch=20)
print("Training completed.")

# create the list of difference between prediction and test data
diff=[]
Ejemplo n.º 7
0
for layer in full_model.layers:
    print(layer.name())
model = full_model.new_graph(["pool5/drop_7x7_s1"])

# The returning model's output layer is "pool5/drop_7x7_s1".
# ### Freeze some layers
# We freeze layers from input to pool4/3x3_s2 inclusive.

model.freeze_up_to(["pool4/3x3_s2"])

# ### Add a few new layers

inputNode = Input(name="input", shape=(3, 224, 224))
inception = model.to_keras()(inputNode)
flatten = Flatten()(inception)
logits = Dense(5)(flatten)
lrModel = Model(inputNode, logits)
classifier = NNClassifier(
    lrModel, CrossEntropyCriterion(),
    transformer).setLearningRate(0.003).setBatchSize(56).setMaxEpoch(
        1).setFeaturesCol("image").setCachingSample(False)
pipeline = Pipeline(stages=[classifier])

# # Train the model
# The transfer learning can finish in a few minutes.

catdogModel = pipeline.fit(trainingDF)
predictionDF = catdogModel.transform(validationDF).cache()

predictionDF.select("AdoptionSpeed",
                    "prediction").sort("AdoptionSpeed",
Ejemplo n.º 8
0
        nb_row=CONVOLVE_2_KERNEL_SIZE,  # 尺寸: 12 - 5 + 1 = 8.
        nb_col=CONVOLVE_2_KERNEL_SIZE,
        activation="relu",
        W_regularizer=L2Regularizer(args.penalty_rate)))
convolve_net.add(
    AveragePooling2D(
        pool_size=(
            POOLING_2_WINDOW_SIZE,  # 尺寸: 8 / 2 = 4.
            POOLING_2_WINDOW_SIZE),
        strides=(POOLING_2_STRIDE_SIZE, POOLING_2_STRIDE_SIZE),
    ))
convolve_net.add(BatchNormalization())
convolve_net.add(Flatten())  # 尺寸: 4 * 4 * 2 -> 32
convolve_net.add(
    Dense(
        output_dim=FC_LINEAR_DIMENSION,  # 尺寸: 32 -> 64.
        activation="sigmoid",
        W_regularizer=L2Regularizer(args.penalty_rate)))
convolve_net.add(Dropout(args.dropout_rate))

# BigDL 不支持 parameter sharing, 不得已而为之.
both_feature = TimeDistributed(layer=convolve_net,
                               input_shape=input_shape)(both_input)

encode_left = both_feature.index_select(1, 0)
encode_right = both_feature.index_select(1, 1)

distance = autograd.abs(encode_left - encode_right)
predict = Dense(output_dim=NUM_CLASS_LABEL,
                activation="sigmoid",
                W_regularizer=L2Regularizer(args.penalty_rate))(distance)
Ejemplo n.º 9
0
convolve_net.add(
    Convolution2D(
        nb_filter=LAYER_2_NUM_CHANNEL,  # 通道: 8 -> 2.
        nb_row=CONVOLVE_2_KERNEL_SIZE,  # 尺寸: 12 - 5 + 1 = 8.
        nb_col=CONVOLVE_2_KERNEL_SIZE,
        activation="relu"))
convolve_net.add(
    AveragePooling2D(
        pool_size=(
            POOLING_2_WINDOW_SIZE,  # 尺寸: 8 / 2 = 4.
            POOLING_2_WINDOW_SIZE),
        strides=(POOLING_2_STRIDE_SIZE, POOLING_2_STRIDE_SIZE)))
convolve_net.add(Flatten())  # 尺寸: 4 * 4 * 2 -> 32
convolve_net.add(
    Dense(
        output_dim=FC_LINEAR_DIMENSION,  # 尺寸: 32 -> 64.
        activation="sigmoid"))

# BigDL 不支持 parameter sharing, 不得已而为之.
both_feature = TimeDistributed(layer=convolve_net,
                               input_shape=input_shape)(both_input)

encode_left = both_feature.index_select(1, 0)
encode_right = both_feature.index_select(1, 1)

distance = autograd.abs(encode_left - encode_right)
predict = Dense(output_dim=NUM_CLASS_LABEL, activation="sigmoid")(distance)

siamese_net = Model(input=both_input, output=predict)
siamese_net.compile(optimizer="adam",
                    loss='sparse_categorical_crossentropy',
Ejemplo n.º 10
0
df = pd.read_csv("../resources/datasets/dataset-1_converted.csv")

trainDf, testDf = train_test_split(df, test_size=0.2)
print("Created Train and Test Df\n")

predictionColumn = 'slotOccupancy'

x = trainDf.drop(columns=[predictionColumn])
inputs = len(x.columns)

y = trainDf[[predictionColumn]]
outputs = len(y.columns)

model = Sequential()
model.add(Dense(output_dim=inputs, activation="relu", input_shape=(inputs, )))
model.add(Dense(output_dim=inputs, activation="relu"))
model.add(Dense(output_dim=outputs))

model.compile(optimizer="adam", loss="mean_squared_error")

model.summary()
print("Created Sequential Model!\n")

xNumpy = x.to_numpy()
yNumpy = y.to_numpy()
# model.fit(x=xNumpy, y=yNumpy, nb_epoch=1, distributed=False)

import tensorflow as tf

weights = np.array(model.get_weights(), dtype=object)
Ejemplo n.º 11
0
    .set("spark.sql.warehouse.dir", "file:///C:/Spark/temp") \
    .set("spark.sql.streaming.checkpointLocation", "file:///C:/Spark/checkpoint") \
    .set("spark.sql.execution.arrow.enabled", "true")
    #.set("spark.sql.execution.arrow.maxRecordsPerBatch", "") # Utsav: Tweak only if memory limits are known. Default = 10,000

spark = SparkSession.builder \
    .config(conf=conf) \
    .getOrCreate()

# Init Big DL Engine
init_engine()

parkingInput2 = Input(shape=(inputs,))
print(parkingInput2.shape)

denseLayer2 = Dense(output_dim=inputs, activation="relu")
hidden2 = denseLayer2(parkingInput2)

lastLayer2 = Dense(output_dim=outputs,activation="relu")(hidden2)
zooModel = Model(input=parkingInput2, output=lastLayer2, name="functionalModel2")
# model2 = Model(inputs=[parkingInput2], outputs=[lastLayer2])

log_dir = "../resources/board/model_log"
app_name = "zooKeras"
zooModel.set_tensorboard(log_dir = log_dir, app_name=app_name)

zooModel.compile(optimizer='adam', loss='mean_squared_error')

zooModel.fit(x=x.to_numpy(), y=y.to_numpy(), nb_epoch=2, distributed=False)
zooModel.summary()