Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
spark:recipes:using-perl-via-pipes [2014/11/07 10:59] straka |
spark:recipes:using-perl-via-pipes [2024/09/27 09:25] (current) straka [Complete Example using Simple Perl Tokenizer and Scala] |
||
---|---|---|---|
Line 34: | Line 34: | ||
On the Python side, the Perl script is used in the following way: | On the Python side, the Perl script is used in the following way: | ||
<file python> | <file python> | ||
- | ... | ||
import json | import json | ||
import os | import os | ||
+ | |||
... | ... | ||
+ | |||
# let rdd be an RDD we want to process | # let rdd be an RDD we want to process | ||
rdd.map(json.dumps).pipe(" | rdd.map(json.dumps).pipe(" | ||
</ | </ | ||
- | ==== Complete Example using Simple Perl Tokenizer ==== | + | ==== Complete Example using Simple Perl Tokenizer |
Suppose we want to write program which uses Perl Tokenizer and then produces token counts. | Suppose we want to write program which uses Perl Tokenizer and then produces token counts. | ||
Line 87: | Line 88: | ||
| | ||
| | ||
+ | sc.stop() | ||
</ | </ | ||
- | It can be executed using '' | + | It can be executed using |
+ | | ||
+ | Note that the Perl script has to be added to the list of files used by the job. | ||
===== Using Scala and JSON ===== | ===== Using Scala and JSON ===== | ||
+ | The Perl side is the same as in [[# | ||
+ | |||
+ | The Scala side is a bit more complicated that the Python, because in Scala the '' | ||
+ | <file scala> | ||
+ | def encodeJson[T <: AnyRef](src: | ||
+ | implicit val formats = org.json4s.jackson.Serialization.formats(org.json4s.NoTypeHints) | ||
+ | return org.json4s.jackson.Serialization.write[T](src) | ||
+ | } | ||
+ | |||
+ | def decodeJson[T: | ||
+ | implicit val formats = org.json4s.jackson.Serialization.formats(org.json4s.NoTypeHints) | ||
+ | return org.json4s.jackson.Serialization.read[T](src) | ||
+ | } | ||
+ | |||
+ | ... | ||
+ | |||
+ | // let rdd be an RDD we want to process, creating '' | ||
+ | rdd.map(encodeJson).pipe(" | ||
+ | </ | ||
+ | |||
+ | ==== Complete Example using Simple Perl Tokenizer and Scala ==== | ||
+ | |||
+ | We now implement the [[# | ||
+ | |||
+ | The Scala file '' | ||
+ | <file scala> | ||
+ | import org.apache.spark.SparkContext | ||
+ | import org.apache.spark.SparkContext._ | ||
+ | |||
+ | object Main { | ||
+ | def encodeJson[T <: AnyRef](src: | ||
+ | implicit val formats = org.json4s.jackson.Serialization.formats(org.json4s.NoTypeHints) | ||
+ | return org.json4s.jackson.Serialization.write[T](src) | ||
+ | } | ||
+ | |||
+ | def decodeJson[T: | ||
+ | implicit val formats = org.json4s.jackson.Serialization.formats(org.json4s.NoTypeHints) | ||
+ | return org.json4s.jackson.Serialization.read[T](src) | ||
+ | } | ||
+ | |||
+ | def main(args: Array[String]) { | ||
+ | if (args.length < 2) sys.error(" | ||
+ | val (input, output) = (args(0), args(1)) | ||
+ | |||
+ | val sc = new SparkContext() | ||
+ | sc.textFile(input) | ||
+ | .map(encodeJson).pipe(" | ||
+ | .flatMap(tokens => tokens.map((_, | ||
+ | .reduceByKey(_+_) | ||
+ | .saveAsTextFile(output) | ||
+ | sc.stop() | ||
+ | } | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | Note that we had to use '' | ||
+ | |||
+ | After compiling '' | ||
+ | spark-submit --files tokenize.pl target/ | ||