./spark-sql --master local[4] --jars /home/hadoop/software/mysql-connector-java-5.1.27-bin.jar --driver-class-path /home/hadoop/software/mysql-connector-java-5.1.27-bin.jarspark 提交任务
./bin/spark-submit --class test001 --master local /home/hadoop/jars/com.xx.bigdata-2.0.jar /home/hadoop/data/84-0.txt /home/hadoop/data/resultspark 入门案例的
import org.apache.spark.{SparkConf, SparkContext} object test001 { def main(args: Array[String]): Unit = { //导入隐饰 *** 作,否则RDD无法调用toDF方法 val outpu_path=args(1) val input_path=args(0) // args(0) val conf = new SparkConf().setAppName(this.getClass.getSimpleName).setMaster("local[4]") val sc = new SparkContext(conf) val data = sc.textFile(input_path) val result = data.flatMap(_.split(" ")).map(x=>(x,1)).reduceByKey(_+_).sortBy(x=>x._2,false) result.saveAsTextFile(outpu_path) sc.stop() } }spark 读写mysql
##maven 加载对应的依赖读写mysqlcom.typesafe config1.3.3 mysql mysql-connector-java5.1.47
import org.apache.spark.sql.{SaveMode, SparkSession} import java.util.Properties object session_source_jdbc { def main(args: Array[String]): Unit = { val session = SparkSession.builder().master("local[4]").appName("read_jdbc").getOrCreate() import session.implicits._ val url="jdbc:mysql://192.168.2.123:3306/hadoop_hive?useUnicode=true&characterEncoding=UTF-8" val table="TBLS" val reader = session.read.format("jdbc"). option("url", url). option("dbtable", table). option("driver", "com.mysql.jdbc.Driver"). option("user", "root"). option("password", "root") val frame = reader.load() frame.createOrReplaceTempView("temp1") val frame1 = session.sql("select TBL_ID,CREATE_TIME,OWNER from temp1 where SD_ID<=8") frame1.show() val url_local=" jdbc:mysql://localhost:3306/mysql001?useUnicode=true&characterEncoding=UTF-8" val prop = new Properties() prop.setProperty("user", "root") prop.setProperty("password", "123456") print("mysql 链接成功") // frame1.write.mode(saveMode ="append").jdbc(url_local,"spark2myql",prop) print("-----") session.stop() } case class person(name:String,age:Int) }
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)