input_shape=input_shape)(both_input) encode_left = both_feature.index_select(1, 0) encode_right = both_feature.index_select(1, 1) distance = autograd.abs(encode_left - encode_right) predict = Dense(output_dim=NUM_CLASS_LABEL, activation="sigmoid", W_regularizer=L2Regularizer(args.penalty_rate))(distance) siamese_net = Model(input=both_input, output=predict) # 声明优化器, 训练并测试模型. optimizer = Optimizer(model=siamese_net, training_rdd=train_rdd, optim_method=Adam(args.learning_rate), criterion=CrossEntropyCriterion(), end_trigger=MaxEpoch(args.num_epoch), batch_size=args.batch_size) optimizer.set_validation(batch_size=args.batch_size, val_rdd=test_rdd, trigger=EveryEpoch(), val_method=[Top1Accuracy()]) # 设置训练日志, 可用 TensorBoard 查询. app_name = "logs" optimizer.set_train_summary(TrainSummary(log_dir=".", app_name=app_name)) optimizer.set_val_summary(ValidationSummary(log_dir=".", app_name=app_name)) optimizer.optimize()
batch_size=args.batch_size) optimizer.set_validation(batch_size=args.batch_size, val_rdd=test_rdd, trigger=EveryEpoch(), val_method=[Top1Accuracy()]) # Create logs. app_name = "logs" optimizer.set_train_summary(TrainSummary(log_dir=".", app_name=app_name)) optimizer.set_val_summary(ValidationSummary(log_dir=".", app_name=app_name)) # Call the optimizer to start training the model. print('\n\nModel training started!') print('\n\nPipeline: Intel Analytics Zoo') print('Starting to train the model on Intel BigDL') print('Paramaters: Shared\n\n') pokemon_model = optimizer.optimize() print('\n\nModel training finished!\n\n') # Make the predictions. predictions = pokemon_model.predict(test_rdd).collect() print('\n\nThe predictions are\n') print('-------------------------------------------------\n') print('Encoding - 1\tEncoding - 2\tInference?\n') print('-------------------------------------------------\n\n') for i in predictions: if (abs(i[0] - i[1]) < 0.1): print(i[0], '\t', i[1], '\tSame Pokemon') else: print(i[0], '\t', i[1], '\tDifferent Pokemon')