- scala> import org.apache.spark.streaming._
- import org.apache.spark.streaming._
- scala> val stc = new StreamingContext(sc, Seconds(3))
- stc: org.apache.spark.streaming.StreamingContext = org.apache.spark.streaming.StreamingContext@4374cd21
- scala> val lines = stc.socketTextStream("localhost", 9999)
- lines: org.apache.spark.streaming.dstream.ReceiverInputDStream[String] = org.apache.spark.streaming.dstream.SocketInputDStream@1e3ef59f
- scala> val words = lines.flatMap(_.split(" "))
- words: org.apache.spark.streaming.dstream.DStream[String] = org.apache.spark.streaming.dstream.FlatMappedDStream@61cbbfd7
- scala> val pairs = words.map(word => (word, 1))
- pairs: org.apache.spark.streaming.dstream.DStream[(String, Int)] = org.apache.spark.streaming.dstream.MappedDStream@5b8088e
- scala> //val wordCounts = pairs.reduceByKey(_ + _)
- scala> val wordCounts = pairs.reduceByKeyAndWindow(((x:Int, y:Int) => x + y),
- | Seconds(15), Seconds(3))
- wordCounts: org.apache.spark.streaming.dstream.DStream[(String, Int)] = org.apache.spark.streaming.dstream.ShuffledDStream@7ca28846
- scala> wordCounts.foreachRDD { rdd =>
- | println("-------------------")
- | rdd.foreach(println)
- | }
- scala> stc.start()
- scala> stc.awaitTermination()
netcat(nc) Input
mountain@mountain:~$ nc -lk 9999
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
Output from spark-shell
-------------------
(3,1)
(5,1)
(1,1)
(6,1)
(4,1)
(2,1)
-------------------
(2,1)
(4,1)
(6,1)
(3,1)
(5,1)
(9,1)
(1,1)
(8,1)
(7,1)
-------------------
(5,1)
(4,1)
(6,1)
(2,1)
(13,1)
(8,1)
(11,1)
(3,1)
(9,1)
(12,1)
(1,1)
(7,1)
(10,1)
-------------------
(2,1)
(13,1)
(6,1)
(5,1)
(14,1)
(3,1)
(9,1)
(12,1)
(1,1)
(8,1)
(11,1)
(4,1)
(15,1)
(7,1)
(10,1)
-------------------
(5,1)
(2,1)
(13,1)
(6,1)
(14,1)
(3,1)
(4,1)
(15,1)
(9,1)
(12,1)
(1,1)
(8,1)
(11,1)
(7,1)
(10,1)
-------------------
(13,1)
(15,1)
(8,1)
(11,1)
(9,1)
(12,1)
(14,1)
(7,1)
(10,1)
-------------------
(11,1)
(13,1)
(15,1)
(12,1)
(14,1)
(10,1)
-------------------
(14,1)
(15,1)
-------------------