Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
spark:spark-introduction [2014/10/03 15:02] straka |
spark:spark-introduction [2022/12/14 13:28] (current) straka [Running Spark Shell in Python] |
||
---|---|---|---|
Line 1: | Line 1: | ||
====== Spark Introduction ====== | ====== Spark Introduction ====== | ||
- | This introduction shows several simple examples to give you an idea what programming in Spark is like. See the official [[http:// | + | This introduction shows several simple examples to give you an idea what programming in Spark is like. See the official [[http:// |
===== Running Spark Shell in Python ===== | ===== Running Spark Shell in Python ===== | ||
- | To run interactive Python shell in local Spark mode, run (on your local workstation or on cluster) | + | To run interactive Python shell in local Spark mode, run (on your local workstation or on cluster |
- | | + | |
- | The IPYTHON=1 parameter instructs Spark to use '' | + | The PYSPARK_DRIVER_PYTHON=ipython3 |
- | After a local Spark executor is started, the Python shell starts. | + | After a local Spark executor is started, the Python shell starts. |
- | | + | the prompt line, the Spark UI address is listed in the following format: |
+ | Spark context Web UI available | ||
+ | The Spark UI is an HTML interface, which displays the state of the application -- whether a distributed computation is taking place, how many workers are part of it, how many tasks are left to be processed, any error logs, also cached datasets and their properties (cached on disk / memory, their size) are displayed. | ||
==== Running Spark Shell in Scala ==== | ==== Running Spark Shell in Scala ==== | ||
Line 23: | Line 25: | ||
The central object of Spark framework is RDD -- resilient distributed dataset. It contains ordered sequence of items, which may be distributed in several threads or on several computers. Spark offers multiple operations which can be performed on RDD, like '' | The central object of Spark framework is RDD -- resilient distributed dataset. It contains ordered sequence of items, which may be distributed in several threads or on several computers. Spark offers multiple operations which can be performed on RDD, like '' | ||
- | Here we load the RDD from text file, every line of the input file becoming an element of RDD. We then split every line into words, count every word occurrence and sort the words by the occurrences. | + | We start by simple word count example. We load the RDD from text file, every line of the input file becoming an element of RDD. We then split every line into words, count every word occurrence and sort the words by the occurrences. |
- | wiki = sc.textFile("/ | + | <file python> |
- | words = wiki.flatMap(lambda line: line.split()) | + | wiki = sc.textFile("/ |
- | counts = words.map(lambda word: (word, 1)).reduceByKey(lambda c1,c2: c1+c2) | + | words = wiki.flatMap(lambda line: line.split()) |
- | sorted = counts.sortBy(lambda | + | counts = words.map(lambda word: (word, 1)).reduceByKey(lambda c1, c2: c1+c2) |
- | sorted.saveAsTextFile('output') | + | sorted = counts.sortBy(lambda |
- | + | sorted.saveAsTextFile("output") | |
- | # Alternatively, | + | |
- | (sc.textFile("/ | + | # Alternatively, |
- | | + | (sc.textFile("/ |
- | | + | |
- | | + | |
- | | + | |
- | | + | |
+ | | ||
+ | </ | ||
The output of ' | The output of ' | ||
+ | |||
+ | Note that '' | ||
The Scala versions is quite similar: | The Scala versions is quite similar: | ||
- | | + | <file scala> |
- | val words = wiki.flatMap(line => line.split(" | + | val wiki = sc.textFile("/ |
- | val counts = words.map(word => (word, | + | val words = wiki.flatMap(line => line.split(" |
- | val sorted = counts.sortBy({case (word, count) => count}, false) | + | val counts = words.map(word => (word, 1)).reduceByKey((c1, |
- | sorted.saveAsTextFile('output') | + | val sorted = counts.sortBy({case (word, count) => count}, |
- | + | sorted.saveAsTextFile("output") | |
- | // Alternatively without variables and using placeholders in lambda parameters: | + | |
- | (sc.textFile("/ | + | // Alternatively without variables and using placeholders in lambda parameters: |
- | | + | (sc.textFile("/ |
- | | + | |
- | | + | |
- | | + | |
+ | | ||
+ | </ | ||
+ | |||
+ | |||
+ | ===== K-Means Example ===== | ||
+ | An example implementing [[http:// | ||
+ | <file python> | ||
+ | import numpy as np | ||
+ | |||
+ | def closestPoint(point, | ||
+ | return min((np.sum((point - centers[i]) ** 2), i) for i in range(len(centers)))[1] | ||
+ | |||
+ | lines = sc.textFile("/ | ||
+ | data = lines.map(lambda line: np.array(map(float, | ||
+ | |||
+ | K = 100 | ||
+ | epsilon = 1e-3 | ||
+ | |||
+ | centers = data.takeSample(False, | ||
+ | for i in range(5): # Perform 5 iterations | ||
+ | old_centers = sc.broadcast(centers) | ||
+ | centers = (data | ||
+ | # For each point, find its closest center index. | ||
+ | | ||
+ | # Sum points and counts in each cluster. | ||
+ | | ||
+ | # Sort by cluster index. | ||
+ | | ||
+ | # Compute the new centers by averaging points in clusters. | ||
+ | | ||
+ | | ||
+ | # If the change in center positions is less than epsilon, stop. | ||
+ | centers_change = sum(np.sqrt(np.sum((a - b)**2)) for (a, b) in zip(centers, | ||
+ | old_centers.unpersist() | ||
+ | if centers_change < epsilon: | ||
+ | break | ||
+ | |||
+ | print "Final centers: " + str(centers) | ||
+ | </ | ||
+ | The implementation starts by loading the data points and caching them in memory using '' | ||
+ | |||
+ | Note that explicit broadcasting used for '' | ||
+ | |||
+ | For illustration, | ||
+ | <file scala> | ||
+ | import breeze.linalg.Vector | ||
+ | |||
+ | type Vector = breeze.linalg.Vector[Double] | ||
+ | type Vectors = Array[Vector] | ||
+ | |||
+ | def closestPoint(point : Vector, centers : Vectors) : Double = | ||
+ | centers.map(center => (center-point).norm(2)).zipWithIndex.min._2 | ||
+ | |||
+ | val lines = sc.textFile("/ | ||
+ | val data = lines.map(line => Vector(line.split(" | ||
+ | |||
+ | val K = 100 | ||
+ | val epsilon = 1e-3 | ||
+ | |||
+ | var i = 0 | ||
+ | var centers_change = Double.PositiveInfinity | ||
+ | var centers = data.takeSample(false, | ||
+ | while (i < 10 && centers_change > epsilon) { | ||
+ | val old_centers = sc.broadcast(centers) | ||
+ | centers = (data | ||
+ | // For each point, find its closes center index. | ||
+ | | ||
+ | // Sum points and counts in each cluster. | ||
+ | | ||
+ | // Sort by cluster index. | ||
+ | | ||
+ | // Compute the new centers by averaging corresponding points. | ||
+ | | ||
+ | | ||
+ | |||
+ | // Compute change in center positions. | ||
+ | centers_change = (centers zip old_centers.value).map({case (a,b) => (a-b).norm(2)}).sum | ||
+ | old_centers.unpersist() | ||
+ | i += 1 | ||
+ | } | ||
+ | |||
+ | print(centers.deep) | ||
+ | </ | ||
[ Back to the navigation ] [ Back to the content ]