大数据之Spark快速上手概述
一、实操
1、增加 Scala 插件
2、增加依赖关系
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.12</artifactId>
<version>3.0.0</version>
</dependency>
</dependencies>
<build>
<plugins>
<!-- 该插件用于将 Scala 代码编译成 class 文件 -->
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<!-- 声明绑定到 maven 的 compile 阶段 -->
<goals>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.0</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
3、WordCount
为了能直观地感受 Spark 框架的效果,接下来我们实现一个大数据学科中最常见的教学
案例 WordCount
package com.spack.bigdata.core.wc
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* WorkCount 实现
*/
object Spark02_WordCount {
def main(args: Array[String]): Unit = {
//Application
//Spark框架
//JDBC:Connection
//建立和Spark框架的链接
val conf = new SparkConf()
conf.setMaster("local")
conf.setAppName("WordCount")
val sc = new SparkContext(conf)
//TODO 执行业务操作
//1、读取文件,获取一行一行的数据
//hello word
val lines: RDD[String] = sc.textFile("datas")
println(lines)
//2、将一行数据进行拆分,形成一个一个单词(分词)
// hello world =>hello,word, hello,word
val words: RDD[String] = lines.flatMap(_.split(" "))
//3、将数据根据单词进行分组、便于统计
// (hello,hello,hello),(world, world)
val wordGroup: RDD[(String, Iterable[String])] = words.groupBy(word => word)
//4、对分组后的数据进行转换
//(hello,hello,hello),(word,word)
//(hello,3),(word,2)
val wordToCount = wordGroup.map {
// word 是 单词(list[]) 格式
case (word, list) => {
(word, list.size)
}
}
val array: Array[(String, Int)] = wordToCount.collect()
array.foreach(println)
//TODO 关闭连接
sc.stop()
}
}
package com.spack.bigdata.core.wc
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* WorkCount 实现
*/
object Spark03_WordCount {
def main(args: Array[String]): Unit = {
//Application
//Spark框架
//JDBC:Connection
//建立和Spark框架的链接
val conf = new SparkConf()
conf.setMaster("local")
conf.setAppName("WordCount")
val sc = new SparkContext(conf)
//TODO 执行业务操作
//1、读取文件,获取一行一行的数据
val lines: RDD[String] = sc.textFile("datas")
println(lines)
//2、将一行数据进行拆分,形成一个一个单词(分词)
// hello world =>hello,word, hello,word
val words: RDD[String] = lines.flatMap(_.split(" "))
val wordToOne = words.map(
word => (word, 1)
)
//3、将数据根据单词进行分组、便于统计
// (hello,hello,hello),(world, world)
val wordGroup: RDD[(String, Iterable[(String, Int)])] = wordToOne.groupBy(
t => t._1
)
//4、对分组后的数据进行转换
val wordToCount = wordGroup.map {
// word 是 单词(list[]) 格式
case (word, list) => {
list.reduce(
(t1, t2) => {
(t1._1, t1._2 + t2._2)
}
)
}
}
val array: Array[(String, Int)] = wordToCount.collect()
array.foreach(println)
//TODO 关闭连接
sc.stop()
}
}
package com.spack.bigdata.core.wc
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* WorkCount 实现
*/
object Spark04_WordCount {
def main(args: Array[String]): Unit = {
//Application
//Spark框架
//JDBC:Connection
//建立和Spark框架的链接
val conf = new SparkConf()
conf.setMaster("local")
conf.setAppName("WordCount")
val sc = new SparkContext(conf)
//TODO 执行业务操作
//1、读取文件,获取一行一行的数据
val lines: RDD[String] = sc.textFile("datas")
println(lines)
//2、将一行数据进行拆分,形成一个一个单词(分词)
// hello world =>hello,word, hello,word
val words: RDD[String] = lines.flatMap(_.split(" "))
val wordToOne = words.map(
word => (word, 1)
)
//分组聚合Sprak使用一个方法实现
//reduceByKey:相同的key的数据、可以对value进行reduce聚合
//wordToOne.reduceByKey((x,y)=>{x+y})
//wordToOne.reduceByKey((x,y)=>x+y)
val wordToCount = wordToOne.reduceByKey(_ + _)
val array: Array[(String, Int)] = wordToCount.collect()
array.foreach(println)
//TODO 关闭连接
sc.stop()
}
}
执行过程中,会产生大量的执行日志,如果为了能够更好的查看程序的执行结果,可以在项
目的 resources 目录中创建 log4j.properties 文件,并添加日志配置信息:
log4j.rootCategory=ERROR, console
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
HH:mm:ss} %p %c{1}: %m%n
# Set the default spark-shell log level to ERROR. When running the spark-shell,
the
# log level for this class is used to overwrite the root logger's log level, so
that
# the user can have different defaults for the shell and regular Spark apps.
log4j.logger.org.apache.spark.repl.Main=ERROR
# Settings to quiet third party logs that are too verbose
log4j.logger.org.spark_project.jetty=ERROR
log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=ERROR
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=ERROR
log4j.logger.org.apache.parquet=ERROR
log4j.logger.parquet=ERROR
# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent
UDFs in SparkSQL with Hive support
log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
4、异常处理
如果本机操作系统是 Windows,在程序中使用了 Hadoop 相关的东西,比如写入文件到
HDFS,则会遇到如下异常:
出现这个问题的原因,并不是程序的错误,而是 windows 系统用到了 hadoop 相关的服
务,解决办法是通过配置关联到 windows 的系统依赖就可以了
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
文章由极客之音整理,本文链接:https://www.bmabk.com/index.php/post/15444.html