def test_var(self): input = Input(shape=[2, 20]) time = TimeDistributed(layer=Dense(30))(input) t1 = time.index_select(1, 0) t2 = time.index_select(1, 1) diff = auto.abs(t1 - t2) assert diff.get_output_shape() == (None, 30) assert diff.get_input_shape() == (None, 30) model = Model(input, diff) data = np.random.uniform(0, 1, [10, 2, 20]) output = model.forward(data) print(output.shape)
convolve_net.add(Flatten()) # 尺寸: 4 * 4 * 2 -> 32 convolve_net.add( Dense( output_dim=FC_LINEAR_DIMENSION, # 尺寸: 32 -> 64. activation="sigmoid", W_regularizer=L2Regularizer(args.penalty_rate))) convolve_net.add(Dropout(args.dropout_rate)) # BigDL 不支持 parameter sharing, 不得已而为之. both_feature = TimeDistributed(layer=convolve_net, input_shape=input_shape)(both_input) encode_left = both_feature.index_select(1, 0) encode_right = both_feature.index_select(1, 1) distance = autograd.abs(encode_left - encode_right) predict = Dense(output_dim=NUM_CLASS_LABEL, activation="sigmoid", W_regularizer=L2Regularizer(args.penalty_rate))(distance) siamese_net = Model(input=both_input, output=predict) # 声明优化器, 训练并测试模型. optimizer = Optimizer(model=siamese_net, training_rdd=train_rdd, optim_method=Adam(args.learning_rate), criterion=CrossEntropyCriterion(), end_trigger=MaxEpoch(args.num_epoch), batch_size=args.batch_size) optimizer.set_validation(batch_size=args.batch_size, val_rdd=test_rdd,
def _to_tensor(self): x = self.model_inputs[0].zvalue return autograd.abs(x)