我一直在尝试使用新的spline jsr运行以下代码:za.co.absa.spline.agent.spark:spark-3.0spline-agent-bundle_2.12:0.6.0,但一直收到特定于UserExtraMetadataProvider的错误,该错误在较新版本中已被弃用。我还尝试使用第一个代码块下面显示的代码将UserExtraMetadataProvider替换为UserExtraAppendingPostProcessingFilter,但仍然出现错误。你能验证并分享如何使用新的样条曲线束正确编写后处理过滤器代码吗。
%scala
import za.co.absa.spline.harvester.conf.StandardSplineConfigurationStack
import za.co.absa.spline.harvester.extra.UserExtraMetadataProvider
import za.co.absa.spline.harvester.HarvestingContext
import org.apache.commons.configuration.Configuration
import za.co.absa.spline.harvester.SparkLineageInitializer._
import za.co.absa.spline.harvester.conf.DefaultSplineConfigurer
import za.co.absa.spline.producer.model._
import scala.util.parsing.json.JSON
val splineConf: Configuration = StandardSplineConfigurationStack(spark)
spark.enableLineageTracking(new DefaultSplineConfigurer(splineConf) {
//override protected def userExtraMetadataProvider = new UserExtraMetaDataProvider {
//val test = dbutils.notebook.getContext.notebookPath
val notebookInformationJson = dbutils.notebook.getContext.toJson
val outerMap = JSON.parseFull(notebookInformationJson).getOrElse(0).asInstanceOf[Map[String,String]]
val tagMap = outerMap("tags").asInstanceOf[Map[String,String]]
val extraContextMap = outerMap("extraContext").asInstanceOf[Map[String,String]]
val notebookPath = extraContextMap("notebook_path").split("/")
val notebookURL = tagMap("browserHostName")+"/?o="+tagMap("orgId")+tagMap("browserHash")
val user = tagMap("user")
val name = notebookPath(notebookPath.size-1)
val notebookInfo = Map("notebookURL" -> notebookURL,
"user" -> user,
"name" -> name,
"mounts" -> dbutils.fs.ls("/mnt").map(_.path),
"timestamp" -> System.currentTimeMillis)
val notebookInfoJson = scala.util.parsing.json.JSONObject(notebookInfo)
override protected def userExtraMetadataProvider: UserExtraMetadataProvider = new UserExtraMetadataProvider {
override def forExecEvent(event: ExecutionEvent, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar1")
override def forExecPlan(plan: ExecutionPlan, ctx: HarvestingContext): Map[String, Any] = Map("notebookInfo" -> notebookInfoJson) // tilføj mount info til searchAndReplace denne funktion indeholder infoen
override def forOperation(op: ReadOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar3")
override def forOperation(op: WriteOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar4")
override def forOperation(op: DataOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar5")
}
})
以下是仍有错误的更新代码
%scala
import za.co.absa.spline.harvester.conf.StandardSplineConfigurationStack
import za.co.absa.spline.harvester.extra.UserExtraMetadataProvider
import za.co.absa.spline.harvester.HarvestingContext
import org.apache.commons.configuration.Configuration
import za.co.absa.spline.harvester.SparkLineageInitializer._
import za.co.absa.spline.harvester.conf.DefaultSplineConfigurer
import za.co.absa.spline.producer.model._
import play.api.libs.json._
val splineConf: Configuration = StandardSplineConfigurationStack(spark)
spark.enableLineageTracking(new DefaultSplineConfigurer(splineConf) {
val notebookInformationJson = Json.toJson(dbutils.notebook.getContext)
val outerMap = Json.toJson(notebookInformationJson).getOrElse(0).asInstanceOf[Map[String,String]]
val tagMap = outerMap("tags").asInstanceOf[Map[String,String]]
val extraContextMap = outerMap("extraContext").asInstanceOf[Map[String,String]]
val notebookPath = extraContextMap("notebook_path").split("/")
val notebookURL = tagMap("browserHostName")+"/?o="+tagMap("orgId")+tagMap("browserHash")
val user = tagMap("user")
val name = notebookPath(notebookPath.size-1)
val notebookInfo = Map("notebookURL" -> Json.toJson(notebookURL),
"user" -> Json.toJson(user),
"name" -> Json.toJson(name),
"mounts" -> Json.toJson(dbutils.fs.ls("/mnt").map(_.path)),
"timestamp" -> Json.toJson(System.currentTimeMillis))
val notebookInfoJson = Json.toJson(notebookInfo)
def userExtraMetadataProvider: UserExtraAppendingPostProcessingFilter
= new UserExtraAppendingPostProcessingFilter
{
def processExecutionEvent(event: ExecutionEvent, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar1")
def processExecutionPlan (plan: ExecutionPlan, ctx: HarvestingContext): Map[String, Any] = Map("notebookInfo" -> notebookInfoJson)
def processReadOperation(op: ReadOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar3")
def processWriteOperation(op: WriteOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar4")
def processDataOperation(op: DataOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar5")
}
})
错误如下:
command-2044409137370707:12: error: not enough arguments for constructor DefaultSplineConfigurer: (sparkSession: org.apache.spark.sql.SparkSession, userConfiguration: org.apache.commons.configuration.Configuration)za.co.absa.spline.harvester.conf.DefaultSplineConfigurer.
Unspecified value parameter userConfiguration.
spark.enableLineageTracking(new DefaultSplineConfigurer(splineConf) {
^
command-2044409137370707:32: error: not found: type UserExtraAppendingPostProcessingFilter
def userExtraMetadataProvider: UserExtraAppendingPostProcessingFilter
^
command-2044409137370707:33: error: not found: type UserExtraAppendingPostProcessingFilter
= new UserExtraAppendingPostProcessingFilter
^
command-2044409137370707:37: error: not found: type ExecutionEvent
def processExecutionEvent(event: ExecutionEvent, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar1")
^
command-2044409137370707:38: error: not found: type ExecutionPlan
def processExecutionPlan (plan: ExecutionPlan, ctx: HarvestingContext): Map[String, Any] = Map("notebookInfo" -> notebookInfoJson)
^
command-2044409137370707:39: error: not found: type ReadOperation
def processReadOperation(op: ReadOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar3")
^
command-2044409137370707:40: error: not found: type WriteOperation
def processWriteOperation(op: WriteOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar4")
^
command-2044409137370707:41: error: not found: type DataOperation
def processDataOperation(op: DataOperation, ctx: HarvestingContext): Map[String, Any] = Map("foo" -> "bar5")
^
command-2044409137370707:36: warning: a pure expression does nothing in statement position; multiline expressions may require enclosing parentheses
{
^
您的代码无法编译有几个原因:
-
您错过了一些导入(错误日志清楚地表明了这一点(:
import za.co.absa.spline.producer.model.v1_1._ import za.co.absa.spline.harvester.extra.UserExtraAppendingPostProcessingFilter
-
额外元数据提供程序的正确签名如下:
protected def maybeUserExtraMetadataProvider: Option[UserExtraMetadataProvider]
-
UserExtraAppendingPostProcessingFilter
只是不推荐使用的UserExtraMetadataProvider
的适配器。因此,您仍然需要创建一个实例:new UserExtraAppendingPostProcessingFilter(new UserExtraMetadataProvider() { // ??? })
请注意,我们正在开发一个用于捕获额外元数据的声明性解决方案,这样大多数规则和值都可以在配置中定义,并且几乎不需要编码。看见https://github.com/AbsaOSS/spline-spark-agent/issues/169
现在只需使用UserExtraMetadataProvider
有关更多详细信息,请参阅https://github.com/AbsaOSS/spline-spark-agent/discussions/228#discussioncomment-819620