我正在尝试从pyspark代码访问存储在S3存储桶中的文件。它给了我附加的错误消息。处理本地存储的文件时,该程序运行良好。我尝试使用 s3://、s3a://和 s3n://但似乎都不起作用。
法典:
ACCESS_KEY = "*********"
SECRET_KEY = "**********"
EncodedSecretKey = SECRET_KEY.replace("/", "%2F")
s3url="s3n://"+ACCESS_KEY+":"+EncodedSecretKey+"@"+bucket_name+"/"+file_name
sqlContext.read.option("delimiter",delimiter).load(s3url,
format='com.databricks.spark.csv',
header='true',
inferSchema='true')
错误信息
Traceback (most recent call last):
File "C:UserssachariAppDataLocalTempzeppelin_pyspark-5481670497409059953.py", line 367, in <module>
raise Exception(traceback.format_exc())
Exception: Traceback (most recent call last):
File "C:UserssachariAppDataLocalTempzeppelin_pyspark-5481670497409059953.py", line 355, in <module>
exec(code, _zcUserQueryNameSpace)
File "<stdin>", line 14, in <module>
File "<stdin>", line 10, in get_df
File "C:zeppelininterpretersparkpysparkpyspark.zippysparksqlreadwriter.py", line 149, in load
return self._df(self._jreader.load(path))
File "C:zeppelininterpretersparkpysparkpy4j-0.10.4-src.zippy4jjava_gateway.py", line 1133, in __call__
answer, self.gateway_client, self.target_id, self.name)
File "C:zeppelininterpretersparkpysparkpyspark.zippysparksqlutils.py", line 63, in deco
return f(*a, **kw)
File "C:zeppelininterpretersparkpysparkpy4j-0.10.4-src.zippy4jprotocol.py", line 319, in get_return_value
format(target_id, ".", name), value)
Py4JJavaError: An error occurred while calling o537.load.
: java.io.IOException: No FileSystem for scheme: s3n
at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2584)
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2591)
at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:91)
at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2630)
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2612)
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:370)
at org.apache.hadoop.fs.Path.getFileSystem(Path.java:296)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$14.apply(DataSource.scala:372)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$14.apply(DataSource.scala:370)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.immutable.List.foreach(List.scala:381)
at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
at scala.collection.immutable.List.flatMap(List.scala:344)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:370)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:152)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:135)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.lang.reflect.Method.invoke(Unknown Source)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:280)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Unknown Source)
您必须
加载 AWS 软件包,对于 PySpark shell,您必须按如下方式加载包,并且它也适用于 Spark-submit 命令。
pyspark --packages org.apache.hadoop:hadoop-aws:2.7.1
或
您必须设置凭据,如以下链接所示。
https://hadoop.apache.org/docs/r2.7.2/hadoop-aws/tools/hadoop-aws/index.html