最新消息: USBMI致力于为网友们分享Windows、安卓、IOS等主流手机系统相关的资讯以及评测、同时提供相关教程、应用、软件下载等服务。

排序sortByKey,和sortBy

IT圈 admin 4浏览 0评论

排序sortByKey,和sortBy

普通排序sortByKey

package com.aura.liu.Dayof20

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.util.AccumulatorV2

import scala.collection.mutable

object SparkSortOps {
def main(args: Array[String]): Unit = {
//局部调整spark应用的日志级别
Logger.getLogger(“org.apache.spark”).setLevel(Level.WARN);
Logger.getLogger(“org.apache.hadoop”).setLevel(Level.WARN);
Logger.getLogger(“org.spark_project”).setLevel(Level.WARN);
val conf=new SparkConf()
.setAppName(s"${SparkSortOps.getClass.getSimpleName}")
.setMaster(“local[2]”)
val sc=new SparkContext(conf)
val list=List(
“hello you”,
“i hate you”,
“i miss you”,
“i love you”,
“fuck you”
)
val words=sc.parallelize(list).flatMap(_.split("\s+"))

   val ret=words.map((_,1)).reduceByKey(_+_)

ret.map{case (key,count)=>(count,key)}//按照count进行排序
//定义排序规则
.sortByKey(false,1)
.map{case (count,key)=>(key,count)}
.foreach(println)

sc.stop()

}

}

sortBy排序

package com.aura.liu.Dayof20

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.reflect.ClassTag

object SparkSortOps1 {
def main(args: Array[String]): Unit = {

//局部调整spark应用的日志级别
Logger.getLogger("org.apache.spark").setLevel(Level.WARN);
Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN);
Logger.getLogger("org.spark_project").setLevel(Level.WARN);
val  conf=new SparkConf().setAppName(s"${SparkSortOps1.getClass.getSimpleName}").setMaster("local[2]")
val sc=new SparkContext(conf)
val list=List("hello you","i hate you","i miss you","i love you","fuck you"
)
val words=sc.parallelize(list).flatMap(_.split("\\s+"))val ret=words.map((_,1)).reduceByKey(_+_)var sortedRDD:RDD[(String,Int)]=ret.sortBy(t
=>t._2,true,1)(new Ordering[Int](){override def compare(x:Int,y:Int)={y.compareTo(x)}},ClassTag.Int.asInstanceOf[ClassTag[Int]]
)
sortedRDD.foreach(println)
println("======定义排序规则根据两列数据进行排序================")
sortedRDD=ret.sortBy(t=>t,true,1)(

new Ordering(String,Int){
override def compare(x: (String, Int), y: (String, Int))={
var ret=y._2.compareTo(x._2)
if (ret==0){
ret=y._1.compareTo(x._1)
}
ret
}
},
ClassTag.Object.asInstanceOf[ClassTag[(String,Int)]]
)
sortedRDD.foreach(println)
sc.stop()

}

}

排序sortByKey,和sortBy

普通排序sortByKey

package com.aura.liu.Dayof20

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.util.AccumulatorV2

import scala.collection.mutable

object SparkSortOps {
def main(args: Array[String]): Unit = {
//局部调整spark应用的日志级别
Logger.getLogger(“org.apache.spark”).setLevel(Level.WARN);
Logger.getLogger(“org.apache.hadoop”).setLevel(Level.WARN);
Logger.getLogger(“org.spark_project”).setLevel(Level.WARN);
val conf=new SparkConf()
.setAppName(s"${SparkSortOps.getClass.getSimpleName}")
.setMaster(“local[2]”)
val sc=new SparkContext(conf)
val list=List(
“hello you”,
“i hate you”,
“i miss you”,
“i love you”,
“fuck you”
)
val words=sc.parallelize(list).flatMap(_.split("\s+"))

   val ret=words.map((_,1)).reduceByKey(_+_)

ret.map{case (key,count)=>(count,key)}//按照count进行排序
//定义排序规则
.sortByKey(false,1)
.map{case (count,key)=>(key,count)}
.foreach(println)

sc.stop()

}

}

sortBy排序

package com.aura.liu.Dayof20

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.reflect.ClassTag

object SparkSortOps1 {
def main(args: Array[String]): Unit = {

//局部调整spark应用的日志级别
Logger.getLogger("org.apache.spark").setLevel(Level.WARN);
Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN);
Logger.getLogger("org.spark_project").setLevel(Level.WARN);
val  conf=new SparkConf().setAppName(s"${SparkSortOps1.getClass.getSimpleName}").setMaster("local[2]")
val sc=new SparkContext(conf)
val list=List("hello you","i hate you","i miss you","i love you","fuck you"
)
val words=sc.parallelize(list).flatMap(_.split("\\s+"))val ret=words.map((_,1)).reduceByKey(_+_)var sortedRDD:RDD[(String,Int)]=ret.sortBy(t
=>t._2,true,1)(new Ordering[Int](){override def compare(x:Int,y:Int)={y.compareTo(x)}},ClassTag.Int.asInstanceOf[ClassTag[Int]]
)
sortedRDD.foreach(println)
println("======定义排序规则根据两列数据进行排序================")
sortedRDD=ret.sortBy(t=>t,true,1)(

new Ordering(String,Int){
override def compare(x: (String, Int), y: (String, Int))={
var ret=y._2.compareTo(x._2)
if (ret==0){
ret=y._1.compareTo(x._1)
}
ret
}
},
ClassTag.Object.asInstanceOf[ClassTag[(String,Int)]]
)
sortedRDD.foreach(println)
sc.stop()

}

}

与本文相关的文章

发布评论

评论列表 (0)

  1. 暂无评论