spark_sql运行有报警如下;
有问题的sql:
select g.dt, frequent , wk , hr , user_id , k.`$name` as user_name , os , manufacturer , page_name , page_url , regexp_replace(button_name,'\n|\r|\t','') as button_name , button_type , first_visit_time , last_visit_time , pv , session_cnt , page_cnt , session_dur , total_dur , load_dur , max_load_dur , min_load_dur , search_content , search_cnt , max_search_dur , min_search_dur , total_search_dur , max_search_cnt , page_visit_dur , buy_time , error_reason , type , uv , father , son , index,g.dt from ( select dt , frequent , wk , hr , user_id , os , manufacturer , page_name , page_url , button_name , button_type , first_visit_time , last_visit_time , pv , session_cnt , page_cnt , session_dur , total_dur , load_dur , max_load_dur , min_load_dur , search_content , search_cnt , max_search_dur , min_search_dur , total_search_dur , max_search_cnt , page_visit_dur , buy_time , error_reason , type , uv , father , son , index from day_total union all select * from hour_total union all select * from day_page union all select * from day_button union all select * from hour_error union all select * from launch union all select * from decision union all select * from visit_back union all select * from province union all select * from os union all select * from manufacturer union all select * from roadmap1 union all select * from roadmap2 ) g left join users k on g.user_id = k.id
报警详细信息:
Exception in thread "main" org.apache.spark.sql.AnalysisException: Found duplicate column(s) when inserting into hdfs://nameservice1/origin_data/events_7/data: `dt`; at org.apache.spark.sql.util.SchemaUtils$.checkColumnNameDuplication(SchemaUtils.scala:85) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:65) at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:104) at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102) at org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:122) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute.apply(SparkPlan.scala:131) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute.apply(SparkPlan.scala:127) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery.apply(SparkPlan.scala:155) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:80) at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:80) at org.apache.spark.sql.DataframeWriter$$anonfun$runCommand.apply(DataframeWriter.scala:668) at org.apache.spark.sql.DataframeWriter$$anonfun$runCommand.apply(DataframeWriter.scala:668) at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId.apply(SQLExecution.scala:78) at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73) at org.apache.spark.sql.DataframeWriter.runCommand(DataframeWriter.scala:668) at org.apache.spark.sql.DataframeWriter.saveToV1Source(DataframeWriter.scala:276) at org.apache.spark.sql.DataframeWriter.save(DataframeWriter.scala:270) at org.apache.spark.sql.DataframeWriter.save(DataframeWriter.scala:228) at org.apache.spark.sql.DataframeWriter.csv(DataframeWriter.scala:656) at com.tcl.kudu.crumb_applet$.main(crumb_applet.scala:476) at com.tcl.kudu.crumb_applet.main(crumb_applet.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52) at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:851) at org.apache.spark.deploy.SparkSubmit.doRunMain(SparkSubmit.scala:167) at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195) at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86) at org.apache.spark.deploy.SparkSubmit$$anon.doSubmit(SparkSubmit.scala:926) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:935) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)2.问题解决
最后查询的sql有两个相同的dt 的字段, g.dt 删除一个后恢复,
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)