使用别名透视和聚合PySpark数据帧



我有一个类似于以下内容的PySpark DataFrame:

df = sc.parallelize([
("c1", "A", 3.4, 0.4, 3.5), 
("c1", "B", 9.6, 0.0, 0.0),
("c1", "A", 2.8, 0.4, 0.3),
("c1", "B", 5.4, 0.2, 0.11),
("c2", "A", 0.0, 9.7, 0.3), 
("c2", "B", 9.6, 8.6, 0.1),
("c2", "A", 7.3, 9.1, 7.0),
("c2", "B", 0.7, 6.4, 4.3)
]).toDF(["user_id", "type", "d1", 'd2', 'd3'])
df.show()

它给出:

+-------+----+---+---+----+
|user_id|type| d1| d2|  d3|
+-------+----+---+---+----+
|     c1|   A|3.4|0.4| 3.5|
|     c1|   B|9.6|0.0| 0.0|
|     c1|   A|2.8|0.4| 0.3|
|     c1|   B|5.4|0.2|0.11|
|     c2|   A|0.0|9.7| 0.3|
|     c2|   B|9.6|8.6| 0.1|
|     c2|   A|7.3|9.1| 7.0|
|     c2|   B|0.7|6.4| 4.3|
+-------+----+---+---+----+

我已经通过type列将结果与sum():进行了汇总

data_wide = df.groupBy('user_id')
.pivot('type').sum()
data_wide.show()

它给出:

+-------+-----------------+------------------+-----------+------------------+-----------+------------------+
|user_id|      A_sum(`d1`)|       A_sum(`d2`)|A_sum(`d3`)|       B_sum(`d1`)|B_sum(`d2`)|       B_sum(`d3`)|
+-------+-----------------+------------------+-----------+------------------+-----------+------------------+
|     c1|6.199999999999999|               0.8|        3.8|              15.0|        0.2|              0.11|
|     c2|              7.3|18.799999999999997|        7.3|10.299999999999999|       15.0|4.3999999999999995|
+-------+-----------------+------------------+-----------+------------------+-----------+------------------+

现在,生成的列名包含`(波浪号(字符,例如,在Vector Assembler中引入这些新列是一个问题,因为它返回syntax error in attribute name。因此,我需要重命名列名,但在循环或reduce(lambda...)函数中调用withColumnRenamed方法需要花费大量时间(实际上我的df有11.520列(。

有没有办法在pivot+聚合步骤中避免这个字符,或者递归地分配一个依赖于新的透视列名称的别名?

提前感谢

可以使用alias:在pivot的聚合中进行重命名

import pyspark.sql.functions as f
data_wide = df.groupBy('user_id')
.pivot('type')
.agg(*[f.sum(x).alias(x) for x in df.columns if x not in {"user_id", "type"}])
data_wide.show()
#+-------+-----------------+------------------+----+------------------+----+------------------+
#|user_id|             A_d1|              A_d2|A_d3|              B_d1|B_d2|              B_d3|
#+-------+-----------------+------------------+----+------------------+----+------------------+
#|     c1|6.199999999999999|               0.8| 3.8|              15.0| 0.2|              0.11|
#|     c2|              7.3|18.799999999999997| 7.3|10.299999999999999|15.0|4.3999999999999995|
#+-------+-----------------+------------------+----+------------------+----+------------------+

然而,这实际上与执行pivot并在之后重命名没有什么不同。以下是此方法的执行计划:

#== Physical Plan ==
#HashAggregate(keys=[user_id#0], functions=[pivotfirst(type#1, sum(`d1`) AS `d1`#169, A, B, 0, 0), pivotfirst(type#1, sum(`d2`) 
#AS `d2`#170, A, B, 0, 0), pivotfirst(type#1, sum(`d3`) AS `d3`#171, A, B, 0, 0)])
#+- Exchange hashpartitioning(user_id#0, 200)
#   +- HashAggregate(keys=[user_id#0], functions=[partial_pivotfirst(type#1, sum(`d1`) AS `d1`#169, A, B, 0, 0), partial_pivotfirst(type#1, sum(`d2`) AS `d2`#170, A, B, 0, 0), partial_pivotfirst(type#1, sum(`d3`) AS `d3`#171, A, B, 0, 0)])
#      +- *HashAggregate(keys=[user_id#0, type#1], functions=[sum(d1#2), sum(d2#3), sum(d3#4)])
#         +- Exchange hashpartitioning(user_id#0, type#1, 200)
#            +- *HashAggregate(keys=[user_id#0, type#1], functions=[partial_sum(d1#2), partial_sum(d2#3), partial_sum(d3#4)])
#               +- Scan ExistingRDD[user_id#0,type#1,d1#2,d2#3,d3#4]

将此与答案中的方法进行比较:

import re
def clean_names(df):
p = re.compile("^(w+?)_([a-z]+)((w+))(?:())?")
return df.toDF(*[p.sub(r"1_3", c) for c in df.columns])
pivoted = df.groupBy('user_id').pivot('type').sum()
clean_names(pivoted).explain()
#== Physical Plan ==
#HashAggregate(keys=[user_id#0], functions=[pivotfirst(type#1, sum(`d1`)#363, A, B, 0, 0), pivotfirst(type#1, sum(`d2`)#364, A, B, 0, 0), pivotfirst(type#1, sum(`d3`)#365, A, B, 0, 0)])
#+- Exchange hashpartitioning(user_id#0, 200)
#   +- HashAggregate(keys=[user_id#0], functions=[partial_pivotfirst(type#1, sum(`d1`)#363, A, B, 0, 0), partial_pivotfirst(type#1, sum(`d2`)#364, A, B, 0, 0), partial_pivotfirst(type#1, sum(`d3`)#365, A, B, 0, 0)])
#      +- *HashAggregate(keys=[user_id#0, type#1], functions=[sum(d1#2), sum(d2#3), sum(d3#4)])
#         +- Exchange hashpartitioning(user_id#0, type#1, 200)
#            +- *HashAggregate(keys=[user_id#0, type#1], functions=[partial_sum(d1#2), partial_sum(d2#3), partial_sum(d3#4)])
#               +- Scan ExistingRDD[user_id#0,type#1,d1#2,d2#3,d3#4]

你会发现两者实际上是完全相同的。通过避免正则表达式,您可能会有一些微小的加速,但与pivot相比,它可以忽略不计。

编写了一个简单快捷的函数来重命名PySpark数据透视表。享受吧!:(

# This function efficiently rename pivot tables' urgly names
def rename_pivot_cols(rename_df, remove_agg):
"""change spark pivot table's default ugly column names at ease.
Option 1: remove_agg = True: `2_sum(sum_amt)` --> `sum_amt_2`.
Option 2: remove_agg = False: `2_sum(sum_amt)` --> `sum_sum_amt_2`
"""
for column in rename_df.columns:
if remove_agg == True:
start_index = column.find('(')
end_index = column.find(')')
if (start_index > 0 and end_index > 0):
rename_df = rename_df.withColumnRenamed(column, column[start_index+1:end_index]+'_'+column[:1])
else:
new_column = column.replace('(','_').replace(')','')
rename_df = rename_df.withColumnRenamed(column, new_column[2:]+'_'+new_column[:1])   
return rename_df

最新更新