Exemplo n.º 1
0
        def getOrCreate(self):
            """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
            new one based on the options set in this builder.

            This method first checks whether there is a valid global default SparkSession, and if
            yes, return that one. If no valid global default SparkSession exists, the method
            creates a new SparkSession and assigns the newly created SparkSession as the global
            default.

            >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
            >>> s1.conf.get("k1") == "v1"
            True

            In case an existing SparkSession is returned, the config options specified
            in this builder will be applied to the existing SparkSession.

            >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
            >>> s1.conf.get("k1") == s2.conf.get("k1")
            True
            >>> s1.conf.get("k2") == s2.conf.get("k2")
            True
            """
            with self._lock:
                from pyspark.context import SparkContext
                from pyspark.conf import SparkConf
                session = SparkSession._instantiatedContext
                if session is None:
                    sparkConf = SparkConf()
                    for key, value in self._options.items():
                        sparkConf.set(key, value)
                    sc = SparkContext.getOrCreate(sparkConf)
                    session = SparkSession(sc)
                for key, value in self._options.items():
                    session.conf.set(key, value)
                return session
Exemplo n.º 2
0
    def _create_shell_session():
        """
        Initialize a SparkSession for a pyspark shell session. This is called from shell.py
        to make error handling simpler without needing to declare local variables in that
        script, which would expose those to users.
        """
        import py4j
        from pyspark.conf import SparkConf
        from pyspark.context import SparkContext
        try:
            # Try to access HiveConf, it will raise exception if Hive is not added
            conf = SparkConf()
            if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
                SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
                return SparkSession.builder\
                    .enableHiveSupport()\
                    .getOrCreate()
            else:
                return SparkSession.builder.getOrCreate()
        except (py4j.protocol.Py4JError, TypeError):
            if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
                warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
                              "please make sure you build spark with hive")

        return SparkSession.builder.getOrCreate()
Exemplo n.º 3
0
 def getOrCreate(self):
     """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a new
     one based on the options set in this builder.
     """
     with self._lock:
         from pyspark.conf import SparkConf
         from pyspark.context import SparkContext
         from pyspark.sql.context import SQLContext
         sparkConf = SparkConf()
         for key, value in self._options.items():
             sparkConf.set(key, value)
         sparkContext = SparkContext.getOrCreate(sparkConf)
         return SQLContext.getOrCreate(sparkContext).sparkSession
Exemplo n.º 4
0
 def test_from_conf_with_settings(self):
     conf = SparkConf()
     conf.set("spark.cleaner.ttl", "10")
     conf.setMaster(self.master)
     conf.setAppName(self.appName)
     self.ssc = StreamingContext(conf=conf, duration=self.batachDuration)
     self.assertEqual(int(self.ssc.sparkContext._conf.get("spark.cleaner.ttl")), 10)
Exemplo n.º 5
0
  def __init__(self):
    # Setup PySpark. This is needed until PySpark becomes available on PyPI,
    # after which we can simply add it to requirements.txt.
    _setup_pyspark()
    from pyspark.conf import SparkConf
    from pyspark.context import SparkContext
    from pyspark.serializers import MarshalSerializer

    # Create a temporary .zip lib file for Metis, which will be copied over to
    # Spark workers so they can unpickle Metis functions and objects.
    metis_lib_file = tempfile.NamedTemporaryFile(suffix='.zip', delete=False)
    metis_lib_file.close()
    _copy_lib_for_spark_workers(metis_lib_file.name)

    # Also ship the Metis lib file so worker nodes can deserialize Metis
    # internal data structures.
    conf = SparkConf()
    conf.setMaster(app.config['SPARK_MASTER'])
    conf.setAppName('chronology:metis')
    parallelism = int(app.config.get('SPARK_PARALLELISM', 0))
    if parallelism:
      conf.set('spark.default.parallelism', parallelism)
    self.context = SparkContext(conf=conf,
                                pyFiles=[metis_lib_file.name],
                                serializer=MarshalSerializer())

    # Delete temporary Metis lib file.
    os.unlink(metis_lib_file.name)

    # We'll use this to parallelize fetching events in KronosSource.
    # The default of 8 is from:
    # https://spark.apache.org/docs/latest/configuration.html
    self.parallelism = parallelism or 8
Exemplo n.º 6
0
        def getOrCreate(self):
            """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
            new one based on the options set in this builder.

            This method first checks whether there is a valid global default SparkSession, and if
            yes, return that one. If no valid global default SparkSession exists, the method
            creates a new SparkSession and assigns the newly created SparkSession as the global
            default.

            >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
            >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
            True

            In case an existing SparkSession is returned, the config options specified
            in this builder will be applied to the existing SparkSession.

            >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
            >>> s1.conf.get("k1") == s2.conf.get("k1")
            True
            >>> s1.conf.get("k2") == s2.conf.get("k2")
            True
            """
            with self._lock:
                from pyspark.context import SparkContext
                from pyspark.conf import SparkConf

                session = SparkSession._instantiatedContext
                if session is None:
                    sparkConf = SparkConf()
                    for key, value in self._options.items():
                        sparkConf.set(key, value)
                    sc = SparkContext.getOrCreate(sparkConf)
                    # This SparkContext may be an existing one.
                    for key, value in self._options.items():
                        # we need to propagate the confs
                        # before we create the SparkSession. Otherwise, confs like
                        # warehouse path and metastore url will not be set correctly (
                        # these confs cannot be changed once the SparkSession is created).
                        sc._conf.set(key, value)
                    session = SparkSession(sc)
                for key, value in self._options.items():
                    session.conf.set(key, value)
                for key, value in self._options.items():
                    session.sparkContext._conf.set(key, value)
                return session
Exemplo n.º 7
0
        def getOrCreate(self):
            """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
            new one based on the options set in this builder.

            This method first checks whether there is a valid thread-local SparkSession,
            and if yes, return that one. It then checks whether there is a valid global
            default SparkSession, and if yes, return that one. If no valid global default
            SparkSession exists, the method creates a new SparkSession and assigns the
            newly created SparkSession as the global default.

            In case an existing SparkSession is returned, the config options specified
            in this builder will be applied to the existing SparkSession.
            """
            with self._lock:
                from pyspark.conf import SparkConf
                from pyspark.context import SparkContext
                from pyspark.sql.context import SQLContext
                sparkConf = SparkConf()
                for key, value in self._options.items():
                    sparkConf.set(key, value)
                sparkContext = SparkContext.getOrCreate(sparkConf)
                return SQLContext.getOrCreate(sparkContext).sparkSession
def main():
    # 解析配置
    app_id = int(sys.argv[1])
    master = sys.argv[2]
    app_name = sys.argv[3]

    # 应用配置
    assert APP_CONFIG.get(app_id) is not None, \
        '[myapp streaming_app_main.main()] configuration error invalid APP_CONFIG with app.id = ' + str(app_id)
    app_conf = map_conf_properties(APP_CONFIG.get(app_id), 'app.id')[app_id]
    spark_home = app_conf['sparkHome']
    pyFiles = app_conf['pyFiles.list']
    di_id = app_conf.get('app.interfaceId')

    # 数据接口配置
    di_in_conf_with_ds_conf = get_di_conf_with_ds_conf(
        di_id, DATAINTERFACE_CONFIG, DATASOURCE_CONFIG,
        di_key='interface.id', di_ds_key='interface.sourceId', ds_key='source.id', merge_key_name='interface.id'
    )[di_id]
    print('= = ' * 20, type(di_in_conf_with_ds_conf), 'di_in_conf_with_ds_conf = ')
    pprint(di_in_conf_with_ds_conf)

    schema_conf_string = di_in_conf_with_ds_conf['schema']
    struct_type = generate_df_schmea(schema_conf_string)
    # schema_field_list = [x.name for x in struct_type.fields]
    di_in_conf_with_ds_conf['struct.type'] = struct_type
    # di_in_conf_with_ds_conf['struct.field.list'] = schema_field_list

    di_out_confs = [kv for kv in DATAINTERFACE_CONFIG.iteritems() if kv[1].get('interface.type', '') == 'output']
    print('= = ' * 20, type(di_out_confs), 'di_out_confs = ')
    pprint(di_out_confs)

    di_out_confs_with_ds_conf = list_dict_merge(
        [get_di_conf_with_ds_conf(
            kv[0], DATAINTERFACE_CONFIG, DATASOURCE_CONFIG,
            di_key='interface.id', di_ds_key='interface.sourceId', ds_key='source.id', merge_key_name='interface.id')
         for kv in DATAINTERFACE_CONFIG.iteritems() if kv[1].get('interface.type', '') == 'output']
    )

    print('= = ' * 20, type(di_out_confs_with_ds_conf), 'di_out_confs_with_ds_conf = ')
    pprint(di_out_confs_with_ds_conf)

    # 外部缓存配置
    cache_confs_with_ds_conf = list_dict_merge(
        [get_di_conf_with_ds_conf(
            kv[0], CACHE_CONFIG, DATASOURCE_CONFIG,
            di_key='cache.id', di_ds_key='cache.sourceId', ds_key='source.id', merge_key_name='cache.id')
         for kv in CACHE_CONFIG.iteritems()]
    )
    print('= = ' * 20, type(cache_confs_with_ds_conf), 'cache_confs_with_ds_conf = ')
    pprint(cache_confs_with_ds_conf)

    # 指定输入接口准备阶段的配置
    # 准备阶段配置中有效步骤的配置
    # Note: 对 dict 进行 filter,传给function的参数是 dict 的 key
    prepares_config_active = PREPARES_CONFIG[di_id] \
        if PREPARES_CONFIG.get(di_id, {}).get('prepares.enabled', False) else {}
    # print('= = ' * 20, type(prepares_config_active), 'prepares_config_active = ')
    # pprint(prepares_config_active)

    # TODO: 2中方法的结果==测试False, 删除注释
    # prepares_config_active_steps = filter(
    # lambda step_conf: step_conf[1].get('step.enabled', False),
    #     map(lambda step_conf: (step_conf[0], map_conf_properties(step_conf[1])),
    #         prepares_config_active.get('steps', {}).iteritems()
    #     )
    # )
    prepares_config_active_steps = \
        [(k, map_conf_properties(v)) for k, v in prepares_config_active.get('steps', {}).iteritems()
         if v.get('step.enabled', False)]

    print('= = ' * 20, type(prepares_config_active_steps), 'prepares_config_active_steps = ')
    pprint(prepares_config_active_steps)

    # 指定输入接口计算阶段的配置
    # filter 之后变成 list,list 的每个元素是 tuple(computeStatistics.id, computeStatistics.conf_dict)
    computes_config_active = COMPUTES_CONFIG[di_id] \
        if COMPUTES_CONFIG.get(di_id, {}).get('computeStatistics.enabled', False) else {}

    # list[{computeStatistic.id: {conf}}, ...]
    # # TODO: 2中方法的结果==测试False, 删除注释
    # compute_computeStatistics_config_active = filter(
    #     lambda computeStatistic_conf: computeStatistic_conf[1].get('computeStatistic.enabled', False),
    #     computes_config_active.get('computeStatistics', {}).iteritems())

    compute_computeStatistics_config_active = [
        kv for kv in computes_config_active.get('computeStatistics', {}).iteritems()
        if kv[1].get('computeStatistic.enabled', False)]
    print('= = ' * 20, type(compute_computeStatistics_config_active), 'compute_computeStatistics_config_active = ')
    pprint(compute_computeStatistics_config_active)

    # {computeStatistic.id -> list[step_conf_tuple]}, 其中 step_conf_tuple = (step_id, step_conf_dict)
    compute_prepares_config_active = dict(map(
        lambda computeStatistic_conf: (computeStatistic_conf[0],
                                       sorted(list_dict_merge(
                                           map(lambda step_conf: map_conf_properties(step_conf[1], 'step.id'),
                                               filter(
                                                   lambda step_conf: step_conf[1].get('step.enabled', False),
                                                   computeStatistic_conf[1].get('prepares.steps', {}).iteritems())
                                           )).iteritems())
        ), compute_computeStatistics_config_active))
    # print('= = ' * 30, compute_prepares_config_active2 == compute_prepares_config_active)

    print('= = ' * 20, type(compute_prepares_config_active), 'compute_prepares_config_active = ')
    pprint(compute_prepares_config_active)

    compute_computes_config_active = dict(map(
        lambda computeStatistic_conf: (computeStatistic_conf[0],
                                       sorted(list_dict_merge(
                                           map(lambda step_conf: map_conf_properties(step_conf[1], 'step.id'),
                                               filter(lambda step_conf: step_conf[1].get('step.enabled', False),
                                                      computeStatistic_conf[1].get('computes.steps', {}).iteritems())
                                           )).iteritems())
        ), compute_computeStatistics_config_active))
    print('= = ' * 20, type(compute_computes_config_active), 'compute_computes_config_active = ')
    pprint(compute_computes_config_active)

    test_flag = False
    if not test_flag:
        # 初始化
        # 测试 serializer
        # serializer 默认取值 PickleSerializer()  #UnpicklingError: invalid load key, '{'.
        # serializer=MarshalSerializer()  # ValueError: bad marshal data
        # serializer=AutoSerializer()  # ValueError: invalid sevialization type: {
        # serializer=CompressedSerializer(PickleSerializer())  # error: Error -3 while decompressing data: incorrect header check

        # sc = SparkContext(master, app_name, sparkHome = spark_home, pyFiles=pyFiles)
        # sc = SparkContext(master, app_name, sparkHome = sparkHome, pyFiles=pyFiles, serializer=MarshalSerializer())
        # sc = SparkContext(master, app_name, sparkHome = sparkHome, pyFiles=pyFiles, serializer=AutoSerializer())
        # sc = SparkContext(master, app_name, sparkHome = sparkHome, pyFiles=pyFiles, serializer=CompressedSerializer(PickleSerializer()))

        spark_conf = SparkConf()
        spark_conf.setMaster(master).setAppName(app_name).setSparkHome(spark_home)

        # spark streaming 调优配置
        spark_streaming_blockInterval = str(app_conf.get('spark.streaming.blockInterval', '')).strip()
        if spark_streaming_blockInterval:
            spark_conf.set('spark.streaming.blockInterval', spark_streaming_blockInterval)

        spark_streaming_kafka_maxRatePerPartition = str(
            app_conf.get('spark.streaming.kafka.maxRatePerPartition', '')).strip()
        if spark_streaming_kafka_maxRatePerPartition:
            spark_conf.set('spark.streaming.kafka.maxRatePerPartition', spark_streaming_kafka_maxRatePerPartition)

        spark_streaming_receiver_maxRate = str(app_conf.get('spark.streaming.receiver.maxRate', '')).strip()
        if spark_streaming_receiver_maxRate:
            spark_conf.set('spark.streaming.receiver.maxRate', spark_streaming_receiver_maxRate)

        spark_streaming_concurrentJobs = str(app_conf.get('spark.streaming.concurrentJobs', '')).strip()
        if spark_streaming_concurrentJobs:
            spark_conf.set('spark.streaming.concurrentJobs', spark_streaming_concurrentJobs)

        # spark sql 调优配置
        spark_sql_shuffle_partitions = str(app_conf.get('spark.sql.shuffle.partitions', '')).strip()
        if spark_sql_shuffle_partitions:
            spark_conf.set('spark.sql.shuffle.partitions', spark_sql_shuffle_partitions)

        sc = SparkContext(conf=spark_conf)
        for path in (pyFiles or []):
            sc.addPyFile(path)

        # 外部缓存优化,broadcast 分发
        cache_manager = CacheManager()
        cache_broadcast_list = \
            [(cache_id, cache_manager.cache_dataset(sc, cache_conf))
             for cache_id, cache_conf in cache_confs_with_ds_conf.iteritems()
             if cache_conf.get('broadcast.enabled', False)]

        for cache_id, cache_broadcast in cache_broadcast_list:
            cache_confs_with_ds_conf[cache_id]['broadcast'] = cache_broadcast

        batchDruationSeconds = app_conf['batchDuration.seconds']
        ssc = StreamingContext(sc, batchDruationSeconds)
        sqlc = SQLContext(sc)

        # 读取数据源
        stream = StreamingReader.readSource(ssc, di_in_conf_with_ds_conf, app_conf)
        # 流处理: 1 根据配置初始化处理指定数据接口的类的实例, 2 调用指定处理类实例的流数据处理方法
        # 测试 kafka_wordcount
        # counts = stream.flatMap(lambda line: line.split(" ")) \
        # .map(lambda word: (word, 1)) \
        # .reduceByKey(lambda a, b: a+b)
        # counts.pprint()
        StreamingApp.process(
            stream, sc, sqlc,
            di_in_conf_with_ds_conf, di_out_confs_with_ds_conf, cache_confs_with_ds_conf,
            prepares_config_active_steps, compute_prepares_config_active, compute_computes_config_active)

        ssc.start()
        ssc.awaitTermination()
Exemplo n.º 9
0
  counts = [field[0] for field in new_values]
  ids = [field[1] for field in new_values]
  if last_sum:
    count = last_sum[0]
    new_ids = last_sum[1]

  return sum(counts) + count, sum(ids) + new_ids


def actualiza2(newcount,oldcount):
    if oldcount == None:
        oldcount=0
    return sum(newcount,oldcount)


conf = SparkConf().setAppName("PySpark Cassandra Test").set("spark.cassandra.connection.host","localhost")
sc = CassandraSparkContext(conf=conf)
sc.setLogLevel("WARN")
def cassandraSend(values):
    #print(values)
    rdd = sc.parallelize([{"subreddit": values[0],"word": values[1],
    "count":values[3],"score":values[2]}])
    rdd.saveToCassandra("reddit","word_counter")

ssc = StreamingContext(sc, 1)
ssc.checkpoint("checkpoint")

dks = KafkaUtils.createDirectStream(ssc, ['Reddit'], {"bootstrap.servers": 'localhost:9092'})

#scores
scores = dks.map(lambda x: loads(x[1])) \
Exemplo n.º 10
0
    def _do_init(self, master, appName, sparkHome, pyFiles, environment,
                 batchSize, serializer, conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represents the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get(
            "PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        auth_token = self._gateway.gateway_parameters.auth_token
        self._accumulatorServer = accumulators._start_update_server(auth_token)
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(
            host, port, auth_token)
        self._jsc.sc().register(self._javaAccumulator)

        # If encryption is enabled, we need to setup a server in the jvm to read broadcast
        # data via a socket.
        # scala's mangled names w/ $ in them require special treatment.
        self._encryption_enabled = self._jvm.PythonUtils.getEncryptionEnabled(
            self._jsc)

        self.pythonExec = self._jvm.scala.Option.apply(
            os.environ.get("PYSPARK_PYTHON"))
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = BroadcastPickleRegistry()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                try:
                    filepath = os.path.join(SparkFiles.getRootDirectory(),
                                            filename)
                    if not os.path.exists(filepath):
                        # In case of YARN with shell mode, 'spark.submit.pyFiles' files are
                        # not added via SparkContext.addFile. Here we check if the file exists,
                        # try to copy and then add it to the path. See SPARK-21945.
                        shutil.copyfile(path, filepath)
                    if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                        self._python_includes.append(filename)
                        sys.path.insert(1, filepath)
                except Exception:
                    warnings.warn(
                        "Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
                        "Python path:\n  %s" % (path, "\n  ".join(sys.path)),
                        RuntimeWarning)

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(
                profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)
Exemplo n.º 11
0
jsc = intp.getJavaSparkContext()

if sparkVersion.isImportAllPackageUnderSparkSql():
    java_import(gateway.jvm, "org.apache.spark.sql.*")
    java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
    java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
    java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
    java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
    java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")

java_import(gateway.jvm, "scala.Tuple2")

jconf = intp.getSparkConf()
conf = SparkConf(_jvm=gateway.jvm, _jconf=jconf)
sc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
sqlc = SQLContext(sc, intp.getSQLContext())
sqlContext = sqlc

completion = PySparkCompletion(intp)
z = PyZeppelinContext(intp.getZeppelinContext())

while True:
    req = intp.getStatements()
    try:
        stmts = req.statements().split("\n")
        jobGroup = req.jobGroup()
        final_code = None

        for s in stmts:
Exemplo n.º 12
0
    def __init__(self,
                 master=None,
                 appName=None,
                 sparkHome=None,
                 pyFiles=None,
                 environment=None,
                 batchSize=1024,
                 serializer=PickleSerializer(),
                 conf=None,
                 gateway=None):
        """
        Create a new SparkContext. At least the master and app name should be set,
        either through the named parameters here or through C{conf}.

        @param master: Cluster URL to connect to
               (e.g. mesos://host:port, spark://host:port, local[4]).
        @param appName: A name for your job, to display on the cluster web UI.
        @param sparkHome: Location where Spark is installed on cluster nodes.
        @param pyFiles: Collection of .zip or .py files to send to the cluster
               and add to PYTHONPATH.  These can be paths on the local file
               system or HDFS, HTTP, HTTPS, or FTP URLs.
        @param environment: A dictionary of environment variables to set on
               worker nodes.
        @param batchSize: The number of Python objects represented as a single
               Java object.  Set 1 to disable batching or -1 to use an
               unlimited batch size.
        @param serializer: The serializer for RDDs.
        @param conf: A L{SparkConf} object setting Spark properties.
        @param gateway: Use an existing gateway and JVM, otherwise a new JVM
               will be instatiated.


        >>> from pyspark.context import SparkContext
        >>> sc = SparkContext('local', 'test')

        >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        ValueError:...
        """
        if rdd._extract_concise_traceback() is not None:
            self._callsite = rdd._extract_concise_traceback()
        else:
            tempNamedTuple = namedtuple("Callsite", "function file linenum")
            self._callsite = tempNamedTuple(function=None,
                                            file=None,
                                            linenum=None)
        SparkContext._ensure_initialized(self, gateway=gateway)

        self.environment = environment or {}
        self._conf = conf or SparkConf(_jvm=self._jvm)
        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 1:
            self.serializer = self._unbatched_serializer
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.iteritems():
                self._conf.setExecutorEnv(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)
        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        # Create the Java SparkContext through Py4J
        self._jsc = self._initialize_context(self._conf._jconf)

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jsc.accumulator(
            self._jvm.java.util.ArrayList(),
            self._jvm.PythonAccumulatorParam(host, port))

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = set()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.append(root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                self._python_includes.append(filename)
                sys.path.append(path)
                if not dirname in sys.path:
                    sys.path.append(dirname)

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir).getAbsolutePath()
Exemplo n.º 13
0
    # path for pyspark and py4j
    spark_pylib = os.path.join(spark_home, "python", "lib")
    py4jlib = [ziplib
               for ziplib in os.listdir(spark_pylib)
               if ziplib.startswith('py4j') and ziplib.endswith('.zip')][0]
    py4jlib = os.path.join(spark_pylib, py4jlib)
    sys.path.append(os.path.join(spark_home, "python"))
    sys.path.append(py4jlib)

addPysparkPath()

from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel

conf = SparkConf()
conf.setMaster('local[*]').setAppName('SparkLit test')
sc = SparkContext(conf=conf)
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org"). setLevel( logger.Level.ERROR )
logger.LogManager.getLogger("akka").setLevel( logger.Level.ERROR )

import sparklit

suite = {}


def setUp():
    PATH = 'tests/grace_dubliners_james_joyce.txt'
    data = sc.textFile(PATH, 4)
    data.persist(StorageLevel.MEMORY_ONLY)
Exemplo n.º 14
0
import sys
from random import random
from operator import add

from pyspark.sql import SparkSession
from pyspark.conf import SparkConf


if __name__ == "__main__":
    """
        Usage: pi [partitions]
    """
    num_executors = 1
    executor_image = "mesosphere/kubeflow:1.0.1-0.5.0-spark-3.0.0-horovod-0.19.5-tensorflow-2.2.0"

    conf = SparkConf()

    conf.setAll([
        ("spark.executor.instances", num_executors),
        ("spark.kubernetes.container.image", executor_image)])

    # print(conf.getAll())

    spark = SparkSession\
        .builder\
        .appName("PythonPi") \
        .config(conf=conf) \
        .getOrCreate()

    #partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
    partitions = 2
import argparse
import json
import os
import sys

import pyspark
from pyspark.sql import SparkSession
from pyspark.conf import SparkConf
from pyspark.sql.functions import col

## get spark context with minimal logging
spark = SparkSession.builder\
                    .config(conf=SparkConf())\
                    .enableHiveSupport()\
                    .getOrCreate()
spark.sparkContext.setLogLevel('WARN')

## mode ID is only command-line argument
## store as variable model_id and assert
## that path models/model_id exists,
## then change to that directory
parser = argparse.ArgumentParser()
parser.add_argument('model_id', help='model ID/directory name')
args = parser.parse_args()
model_id = args.model_id
assert os.path.exists('models/{}'.format(model_id))
os.chdir('models/{}'.format(model_id))

## load model and plots configuration files
model_dict = json.load(open('model.json', 'r'))
plots_dict = json.load(open('plots.json', 'r'))
Exemplo n.º 16
0
# coding=utf-8
from pprint import pprint
from pyspark import SparkContext
from pyspark.conf import SparkConf
from pyspark.sql import SQLContext
import sys
import time

#Inizio tempo di esecuzione
startTime = time.time()

#Spark richiede una configurazione per far si che l'applicazione venga eseguita dal cluster
conf = SparkConf().setAppName("MoviesReccomandations baseline")
#Quando eseguiamo un'applicazione Spark, viene avviato un Contesto Spark che permette di eseguire le
#operazioni all'interno degli esecutori. sc conterrà il contesto della nostra applicazione Spark
sc = SparkContext(conf=conf)
#Visualizza solo gli errori
sc.setLogLevel("ERROR")
#Apriamo un contesto SQL per eseguire le query tramite l'applicazione Spark
sqlContext = SQLContext(sc)

#Prendiamo il tempo di inizio esecuzione
startTime = time.time()

#n° partizioni in cui splittare l'RDD. sys permette di prendere il parametro specificato nello step "Run Baseline"
n_partitions = int(sys.argv[1])

#Creo l'RDD dal file csv identificato tramite il percorso contenuto in S3 specificando anche il n° di partizioni
#in cui l'RDD verrà splittato
ratings_data_temp = sc.textFile("s3://bucketbigdataemr/files/ratings.csv",
                                n_partitions)
Exemplo n.º 17
0
        default=10)

    classpath = os.popen(os.environ["HADOOP_HOME"] +
                         "/bin/hadoop classpath --glob").read()
    args = parser.parse_args()

    spark_executor_instances = args.num_executor
    spark_cores_max = spark_executor_instances * args.spark_executor_cores

    conf = SparkConf() \
      .setAppName("triplet_distributed_train") \
      .set("spark.eventLog.enabled", "false") \
      .set("spark.dynamicAllocation.enabled", "false") \
      .set("spark.shuffle.service.enabled", "false") \
      .set("spark.executor.cores", str(args.spark_executor_cores)) \
      .set("spark.cores.max", str(spark_cores_max)) \
      .set("spark.task.cpus", str(args.spark_executor_cores)) \
      .set("spark.executor.instances", str(args.num_executor)) \
      .setExecutorEnv("JAVA_HOME", os.environ["JAVA_HOME"]) \
      .setExecutorEnv("HADOOP_HDFS_HOME", os.environ["HADOOP_HOME"]) \
      .setExecutorEnv("LD_LIBRARY_PATH", os.environ["JAVA_HOME"] + "/jre/lib/amd64/server:" + os.environ["HADOOP_HOME"] + "/lib/native:" + "/usr/local/cuda-8.0/lib64" ) \
      .setExecutorEnv("CLASSPATH", classpath) \
      .set("hostbalance_shuffle","true")

    print("{0} ===== Start".format(datetime.now().isoformat()))
    sc = SparkContext(conf=conf)
    # sc.setLogLevel("DEBUG")
    num_executors = int(args.num_executor)
    num_ps = 1

    cluster = TFCluster.run(sc, main_fun, args, num_executors, num_ps,
                            args.tensorboard, TFCluster.InputMode.TENSORFLOW)
Exemplo n.º 18
0
  l=sorted(l,key=lambda x:-x[1])
  return l[0:k]

def mergeWords(words_count,K=100,num_partitions=10):
  agg=words_count.map(lambda x:(x[0][1],(x[0][0],x[1])))\
      .groupByKey()\
      .map(lambda x:(x[0],firstK(x[1],K)))
  return agg

#draw word_cloud


if __name__== "__main__":
  ST="8/25/2017 14"
  ET="8/29/2017 14"
  conf = SparkConf()
  conf.setMaster("local[8]").setAppName("YELP")
  sc = SparkContext(conf=conf)
  log4j = sc._jvm.org.apache.log4j
  log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR)
  print("Set log level to Error")
  num_partitions=10
  sqlContext=SQLContext(sc)
  #tweets=readAllTweets(sqlContext,"small.csv",num_partitions)
  tweets=readAllTweets(sqlContext,"Harvey_tweets.csv",num_partitions)
  print("finished reading ",tweets.count())
#364255
  print(tweets.top(10))
  #remove words
  from nltk.corpus import stopwords
  to_remove=stopwords.words('english')
Exemplo n.º 19
0
  with strategy.scope():
    multi_worker_model = build_and_compile_cnn_model()
  multi_worker_model.fit(x=train_datasets, epochs=args.epochs, steps_per_epoch=args.steps_per_epoch, callbacks=callbacks)

  from tensorflow_estimator.python.estimator.export import export_lib
  export_dir = export_lib.get_timestamped_export_dir(args.export_dir)
  compat.export_saved_model(multi_worker_model, export_dir, ctx.job_name == 'chief')


if __name__ == '__main__':
  import argparse
  from pyspark.context import SparkContext
  from pyspark.conf import SparkConf
  from tensorflowonspark import TFCluster

  sc = SparkContext(conf=SparkConf().setAppName("mnist_keras"))
  executors = sc._conf.get("spark.executor.instances")
  num_executors = int(executors) if executors is not None else 1

  parser = argparse.ArgumentParser()
  parser.add_argument("--batch_size", help="number of records per batch", type=int, default=64)
  parser.add_argument("--buffer_size", help="size of shuffle buffer", type=int, default=10000)
  parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
  parser.add_argument("--epochs", help="number of epochs", type=int, default=3)
  parser.add_argument("--model_dir", help="path to save model/checkpoint", default="mnist_model")
  parser.add_argument("--export_dir", help="path to export saved_model", default="mnist_export")
  parser.add_argument("--steps_per_epoch", help="number of steps per epoch", type=int, default=469)
  parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true")

  args = parser.parse_args()
  print("args:", args)
Exemplo n.º 20
0
def run(broker_url=BROKER_URL,
        topic_redis="redis-events",
        topic_customer="customer-events",
        topic_output="intelligence-board",
        output_source="kafka",
        timeout=60):
    """process and merge multiple streams
    Args:
        broker_url(str): url of a kafka broker
        topic_redis(str): name of the topic with the in-memory stored data
        topic_customer(str): name of the topic with customer data
        topic_output(str): name of the topic with the processed stream
        output_source(str): name of the output source
                            "kafka": print the stream in the topic_output
                            "console": print the stream as a log
        timeout(int): seconds to wait before closing the stream

    to deploy this script in a standalone containerised cluster run:
    ```
        docker exec -it spark-streaming-etl-template \
        /opt/bitnami/spark/bin/spark-submit \
        --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0 \
        /home/workspace/spark_streaming_etl_template/scripts/stream_processing.py \
        | tee ../../spark/logs/stream_processing.log
    ```
    """
    # create spark application
    conf = SparkConf().setMaster("spark://localhost:7077") \
        .setAppName('consume-process-produce')
    spark = SparkSession.builder.config(conf=SparkConf()).getOrCreate()
    sc = spark.sparkContext.setLogLevel('WARN')

    # process streams
    customerriskstreamingdf = process_redis_server(spark, broker_url,
                                                   topic_redis)
    emailAndBirthYearStreamingDF = process_customer_events(
        spark, broker_url, topic_customer)
    # merge the stream
    df_merge = customerriskstreamingdf. \
        join(emailAndBirthYearStreamingDF, expr("""
        email = customer
        """))

    # output processed batch
    if output_source == "kafka":
        stream = df_merge \
            .selectExpr("cast(customer as string) as key",
                        "to_json(struct(*)) as value") \
            .writeStream.format("kafka") \
            .option("kafka.bootstrap.servers", "localhost:9092") \
            .option("topic", topic_output) \
            .option("checkpointLocation", "/tmp/kafka_checkpoint_") \
            .start()
        stream.awaitTermination(timeout)
        stream.stop()

    elif output_source == "console":
        stream = df_merge \
            .selectExpr("cast(customer as string) as key",
                        "to_json(struct(*)) as value") \
            .writeStream.format("console") \
            .start()
        stream.awaitTermination(timeout)
        stream.stop()
Exemplo n.º 21
0
 def printSparkConfigurations():
     c = SparkConf()
     print("Spark configurations: {}".format(c.getAll()))
     '/home/dan/Desktop/IMN432-CW01/TF_IDF/',
     '/home/dan/Desktop/IMN432-CW01/Word_Freq/',
     '/home/dan/Desktop/IMN432-CW01/IDF/',
     '/home/dan/Desktop/IMN432-CW01/IDF/IDF-Pairs',
     '/home/dan/Desktop/IMN432-CW01/IDF',
     '/home/dan/Desktop/IMN432-CW01/TF_IDF/TF_IDF_File',
     '/home/dan/Desktop/IMN432-CW01/processXML/',
     '/home/dan/Desktop/IMN432-CW01/meta/',
     '/home/dan/Desktop/IMN432-CW01/TF_IDF',
     '/home/dan/Desktop/IMN432-CW01/processXML/Subject',
     '/home/dan/Spark_Files/Books/stopwords_en.txt']
 allFiles = getFileList(directory[0])
 # Find the Number of Files in the Directory
 numFiles = len(allFiles)
 # Create Spark Job Name and Configuration Settings
 config = SparkConf().setMaster("local[*]")
 config.set("spark.executor.memory", "5g")
 sc = SparkContext(conf=config, appName="ACKF415-Coursework-1")
 # Create a File Details List
 N = numFiles
 fileEbook = []
 print('################################################')
 print('###### Process Files > Word Freq to Pickle #####\n')
 # Start Timer
 WordFreq_Time = time()
 # Pickled Word Frequencies
 pickleWordF = getFileList(directory[2])
 # Ascertain if Section has already been completed
 if len(pickleWordF) < 1:
     print 'Creating Work Freq Pickles and RDDs \n'
     # Import the Stop and Save as a List
Exemplo n.º 23
0
            variables_to_restore = variable_averages.variables_to_restore()
            saver = tf.train.Saver(variables_to_restore)

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()

            summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

            while True:
                eval_once(saver, summary_writer, top_k_op, summary_op)
                if FLAGS.run_once:
                    break
                time.sleep(FLAGS.eval_interval_secs)

    #cifar10.maybe_download_and_extract()
    if tf.gfile.Exists(FLAGS.eval_dir):
        tf.gfile.DeleteRecursively(FLAGS.eval_dir)
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    evaluate()


if __name__ == '__main__':
    sc = SparkContext(conf=SparkConf().setAppName("cifar10_eval"))
    num_executors = int(sc._conf.get("spark.executor.instances"))
    num_ps = 0

    cluster = TFCluster.reserve(sc, num_executors, num_ps, False,
                                TFCluster.InputMode.TENSORFLOW)
    cluster.start(main_fun, sys.argv)
    cluster.shutdown()
Exemplo n.º 24
0
    def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
                 conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represent the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception("An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
        self._jsc.sc().register(self._javaAccumulator)

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = BroadcastPickleRegistry()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                    self._python_includes.append(filename)
                    sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)
Exemplo n.º 25
0
                master=server.target,
                is_chief=(FLAGS.task == 0),
                init_fn=_get_init_fn(),
                summary_op=summary_op,
                number_of_steps=FLAGS.max_number_of_steps,
                log_every_n_steps=FLAGS.log_every_n_steps,
                save_summaries_secs=FLAGS.save_summaries_secs,
                save_interval_secs=FLAGS.save_interval_secs,
                summary_writer=summary_writer,
                sync_optimizer=optimizer if FLAGS.sync_replicas else None)


if __name__ == '__main__':
    import argparse

    sc = SparkContext(conf=SparkConf().setAppName("train_image_classifier"))
    executors = sc._conf.get("spark.executor.instances")
    num_executors = int(executors) if executors is not None else 1

    parser = argparse.ArgumentParser()
    parser.add_argument("--num_ps_tasks",
                        help="number of PS nodes",
                        type=int,
                        default=0)
    parser.add_argument("--tensorboard",
                        help="launch tensorboard process",
                        action="store_true")
    parser.add_argument("--cluster_size",
                        help="number of nodes in the cluster",
                        type=int,
                        default=num_executors)
    def als(self):
        """takes in user ratings from 2019 and returns predictions based on Spark's ALS model"""
        def get_db_properties():
            """loads postgresql information from config file"""
            db_properties = {}
            config = configparser.ConfigParser()
            config.read("db_properties.ini")
            db_prop = config["postgresql"]
            db_properties["user"] = db_prop["user"]
            db_properties["password"] = db_prop["password"]
            db_properties["url"] = db_prop["url"]
            db_properties["driver"] = db_prop["driver"]
            return db_properties

        def get_user_ratings(self):
            """returns list of ratings from landing page and list of seen movies by user"""
            movies_user_has_seen = []
            for i, user_movie_id in enumerate(self.user_input_ids):
                if f"seen{i}" in self.user_input.keys():
                    movies_user_has_seen.append(user_movie_id)
                    current_rating = int(self.user_input[f"rating{i}"])
                    self.user_ratings[i] = current_rating / 10
            return movies_user_has_seen

        def get_new_rtrue(self, db_ratings_2019):
            """Appends ratings from landing page to ratings table"""
            new_user_ratings = [(0, self.user_input_ids[i],
                                 self.user_ratings[i])
                                for i in range(len(self.user_input_ids))]
            new_user_df = spark.createDataFrame(
                new_user_ratings, ["userId", "movieId", "rating"])
            new_rtrue = db_ratings_2019.union(new_user_df)
            return new_rtrue

        def get_recommendations_for_new_user(model, num_recommendations=500):
            """determine recommendations for selected user"""
            new_user = spark.createDataFrame([(0, )], ["userId"])
            user_subset_recs = model.recommendForUserSubset(
                new_user, num_recommendations)
            result = user_subset_recs.collect()
            row = result[0]
            recommended_movies = []
            for i in range(num_recommendations):
                recommended_movies.append(
                    row.asDict()["recommendations"][i]["movieId"])
            return recommended_movies

        def get_relevant_genre(user_movies, movies):
            """find most relevant genre for new user"""
            high_rated = []
            for (key, value) in user_movies.items():
                if value > 3.5:
                    high_rated.append(key)
            user_genres = [
                row.genres for row in movies.filter(
                    movies.movieId.isin(high_rated)).collect()
            ]
            words = re.findall(r"[a-zA-Z'-]+", " ".join(user_genres))
            words = sorted(words)
            important_genre = Counter(words).most_common(1)
            try:
                top_genre = important_genre[0][0]
            except:
                top_genre = "(no genres listed)"
            return top_genre

        def filter_recommendations(recommended_movies, movies_ratings_2019):
            """filter recommendations by genre and average rating, return dict with top 10 recommendations"""
            filtered_recommendations = (movies_ratings_2019.filter(
                movies_ratings_2019.movieId.isin(recommended_movies)).filter(
                    movies_ratings_2019.genres.contains(top_genre)).filter(
                        movies_ratings_2019.avg_rating > 3.5).sort(
                            desc("total_ratings")).limit(10))
            filtered_recommended_movies = {
                row.movieId: row.title
                for row in filtered_recommendations.collect()
            }
            return filtered_recommended_movies

        def output_shape(filtered_recs, movies_user_has_seen, num_recs=3):
            """reduce number of recommendations, avoid movies user has seen and return as dictionary"""
            counter = 0
            recommendations = {}
            for key, value in filtered_recs.items():
                if counter >= num_recs:
                    break
                else:
                    if key not in movies_user_has_seen:
                        print(value)
                        recommendations[int(key)] = {"title": value}
                        counter += 1
                    else:
                        pass
            return recommendations

        # Set up Spark
        conf = SparkConf()
        conf.set(
            "spark.jars",
            "../data/jars/postgresql-42.2.16.jar",
        )
        spark = (SparkSession.builder.appName("Spark_Recommender").config(
            conf=conf).getOrCreate())

        # Load the data from PostgreSQL RDS
        db_properties = get_db_properties()
        db_ratings_2019 = spark.read.jdbc(url=db_properties["url"],
                                          table="filtered_ratings_2019",
                                          properties=db_properties)
        db_ratings_2019 = db_ratings_2019.select(
            col("user_id").alias("userId"),
            col("movie_id").alias("movieId"),
            col("rating"),
        )
        movies = spark.read.jdbc(url=db_properties["url"],
                                 table="movies",
                                 properties=db_properties)
        movies = movies.select(
            col("movie_id").alias("movieId"), col("title"), col("genres"))
        movies_ratings_2019 = spark.read.jdbc(
            url=db_properties["url"],
            table="movies_ratings_2019",
            properties=db_properties,
        )
        movies_ratings_2019 = movies_ratings_2019.select(
            col("movie_id").alias("movieId"),
            col("title"),
            col("genres"),
            col("avg_rating"),
            col("total_ratings"),
        )

        # Prepare ratings dataframe
        movies_user_has_seen = get_user_ratings(self)
        user_movies = dict(zip(self.user_input_ids, self.user_ratings))
        new_rtrue = get_new_rtrue(self, db_ratings_2019)

        # Run the model
        als = ALS(
            rank=20,
            maxIter=15,
            regParam=0.01,
            # implicitPrefs=True,
            userCol="userId",
            itemCol="movieId",
            ratingCol="rating",
            coldStartStrategy="drop",
        )
        model = als.fit(new_rtrue)

        # Filter and reshape recommendations
        recommended_movies = get_recommendations_for_new_user(model)
        top_genre = get_relevant_genre(user_movies, movies)
        filtered_recommended_movies = filter_recommendations(
            recommended_movies, movies_ratings_2019)
        recommendations = output_shape(filtered_recommended_movies,
                                       movies_user_has_seen)

        return recommendations
Exemplo n.º 27
0
import sys
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType, StructField, StructType
from pyspark.conf import SparkConf
from src.service import kdd
from src.service import extraction


def readDataFromHive(spark, input_table):
    sql = "select defectNo as name,summary as describe,content as detail from " + input_table + ""
    return spark.sql(sql).fillna('')


if __name__ == '__main__':
    input_table = "wufan.es_idms_defect_v4"
    conf = SparkConf()
    cluster_warehouse = 'warehouse_dir'
    local_warehouse = 'file:\\C:\\Users\\wys3160\\software\\tmp\hive2'
    conf.set("spark.sql.warehouse.dir", cluster_warehouse)
    conf.set("spark.debug.maxToStringFields", 1000)
    # conf.set("spark.sql.parquet.binaryAsString","true")
    # config("spark.sql.shuffle.partitions","180")
    spark = SparkSession.builder.appName("test") \
        .config('spark.executor.extraJavaOptions', '-Dfile.encoding=utf-8') \
        .config('spark.driver.extraJavaOptionsservice_contract_id', '-Dfile.encoding=utf-8')\
        .config("spark.sql.shuffle.partitions","220")\
        .config("spark.sql.parquet.enableVectorizedReader", "false") \
        .enableHiveSupport().getOrCreate()
    # spark=SparkSession.builder.appName("knowledge").enableHiveSupport().getOrCreate()
    data = readDataFromHive(spark, input_table)
    if data.count() == 0:
from __future__ import print_function

from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession

import argparse
import sys
import tensorflow as tf
from datetime import datetime

from tensorflowonspark import TFCluster, dfutil
from tensorflowonspark.pipeline import TFEstimator, TFModel
import mnist_dist_pipeline

sc = SparkContext(conf=SparkConf().setAppName("mnist_tf"))
spark = SparkSession(sc)

executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
num_ps = 1

parser = argparse.ArgumentParser()

# TFoS/cluster
parser.add_argument("--batch_size",
                    help="number of records per batch",
                    type=int,
                    default=100)
parser.add_argument("--epochs", help="number of epochs", type=int, default=1)
parser.add_argument("--model_dir",
Exemplo n.º 29
0

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from pyspark.context import SparkContext
from pyspark.conf import SparkConf

import argparse
import numpy
from datetime import datetime

from tensorflowonspark import TFCluster

sc = SparkContext(conf=SparkConf().setAppName("parker_spark"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
num_ps = 1

parser = argparse.ArgumentParser()
'''
parser.add_argument("--batch_size", help="number of records per batch", type=int, default=100)
parser.add_argument("--epochs", help="number of epochs", type=int, default=1)
parser.add_argument("--format", help="example format: (csv|pickle|tfr)", choices=["csv", "pickle", "tfr"], default="csv")
parser.add_argument("--images", help="HDFS path to MNIST images in parallelized format")
parser.add_argument("--labels", help="HDFS path to MNIST labels in parallelized format")
parser.add_argument("--model", help="HDFS path to save/load model during train/inference", default="mnist_model")
parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("--output", help="HDFS path to save test/inference output", default="predictions")
parser.add_argument("--readers", help="number of reader/enqueue threads", type=int, default=1)
Exemplo n.º 30
0
 def getConf(self):
     conf = SparkConf()
     conf.setAll(self._conf.getAll())
     return conf
Exemplo n.º 31
0
 def test_existing_spark_context_with_settings(self):
     conf = SparkConf()
     conf.set("spark.cleaner.ttl", "10")
     self.sc = SparkContext(master=self.master, appName=self.appName, conf=conf)
     self.ssc = StreamingContext(sparkContext=self.sc, duration=self.batachDuration)
     self.assertEqual(int(self.ssc.sparkContext._conf.get("spark.cleaner.ttl")), 10)
Exemplo n.º 32
0
    def _do_init(self, master, appName, sparkHome, pyFiles, environment,
                 batchSize, serializer, conf, jsc, profiler_cls):
        self.environment = environment or {}
        self._conf = conf or SparkConf(_jvm=self._jvm)
        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        # Let YARN know it's a pyspark app, so it distributes needed libraries.
        if self.master == "yarn-client":
            self._conf.set("spark.yarn.isPython", "true")

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v
        if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
            # disable randomness of hash of string in worker, if this is not
            # launched by spark-submit
            self.environment["PYTHONHASHSEED"] = "0"

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jsc.accumulator(
            self._jvm.java.util.ArrayList(),
            self._jvm.PythonAccumulatorParam(host, port))

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = set()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                    self._python_includes.append(filename)
                    sys.path.insert(
                        1, os.path.join(SparkFiles.getRootDirectory(),
                                        filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(
                profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)
Exemplo n.º 33
0
                    sec_per_batch = float(duration)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=FLAGS.train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook()
                ],
                config=tf.ConfigProto(log_device_placement=FLAGS.
                                      log_device_placement)) as mon_sess:
            while not mon_sess.should_stop():
                mon_sess.run(train_op)


if __name__ == '__main__':
    sc = SparkContext(conf=SparkConf().setAppName("cifar10_train"))
    num_executors = int(sc._conf.get("spark.executor.instances"))
    num_ps = 0

    cluster = TFCluster.reserve(sc, num_executors, num_ps, False,
                                TFCluster.InputMode.TENSORFLOW)
    cluster.start(main_fun, sys.argv)
    cluster.shutdown()
Exemplo n.º 34
0
def get_spark_session():
    conf = SparkConf()
    conf.setMaster('yarn-client')
    conf.set("spark.yarn.am.cores", 7)
    conf.set("spark.executor.memory", "40g")
    conf.set("spark.executor.instances", 30)
    conf.set("spark.executor.cores", 8)
    conf.set("spark.python.worker.memory", "2g")
    conf.set("spark.default.parallelism", 2000)
    conf.set("spark.sql.shuffle.partitions", 2000)
    conf.set("spark.broadcast.blockSize", 1024)
    conf.set("spark.shuffle.file.buffer", '512k')
    conf.set("spark.speculation", True)
    conf.set("spark.speculation.quantile", 0.98)

    spark = SparkSession \
        .builder \
        .appName("hgongjing2_hdfs_to_hbase") \
        .config(conf = conf) \
        .enableHiveSupport() \
        .getOrCreate()

    return spark
Exemplo n.º 35
0
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.feature import StandardScaler
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.recommendation import Rating, ALS, MatrixFactorizationModel
from pyspark.mllib.evaluation import MulticlassMetrics
from time import time
import pandas as pd

sc = SparkContext(conf=SparkConf().setAppName("MovieLensRec"))

path = "file:/Users/dawang/Documents/GitHub/computational-advertising-note/spark"
print("数据路径", path)
rawData = sc.textFile(path + "/data/covertype/covtype.data")
print("取出前两个")
print(rawData.take(2))
print("清理数据")
lines = rawData.map(lambda x: x.split(","))
print("共计", lines.count(), "项数据")

import numpy as np


def extract_features(record, featureEnd):
    numericalFeatures = [
        convert_float(field) for field in record[0:featureEnd]
    ]
    return numericalFeatures

Exemplo n.º 36
0
import datetime
from pytz import timezone
print "Last run @%s" % (datetime.datetime.now(timezone('US/Pacific')))


# In[2]:

from pyspark.context import SparkContext
print "Running Spark Version %s" % (sc.version)


# In[3]:

from pyspark.conf import SparkConf
conf = SparkConf()
print conf.toDebugString()


# In[4]:

# Read Orders
orders = sqlContext.read.format('com.databricks.spark.csv').options(header='true').load('NW/NW-Orders.csv')


# In[5]:

order_details = sqlContext.read.format('com.databricks.spark.csv').options(header='true').load('NW/NW-Order-Details.csv')


# In[6]:
Exemplo n.º 37
0
                                    y_: batch_ys
                                })))

                        if sv.is_chief:
                            summary_writer.add_summary(summary, step)

            if sv.should_stop() or step >= args.steps:
                tf_feed.terminate()

        # Ask for all the services to stop.
        print("{0} stopping supervisor".format(datetime.now().isoformat()))
        sv.stop()


if __name__ == '__main__':
    sc = SparkContext(conf=SparkConf().setAppName("read hdfs save to hdfs "))
    hive_context = HiveContext(sc)
    executors = sc._conf.get("spark.executor.instances")
    num_executors = int(executors) if executors is not None else 1
    num_ps = 1

    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", help="input hive table")
    parser.add_argument(
        "-m",
        "--model",
        help="HDFS path to save/load model during train/inference",
        default="mnist_model")
    parser.add_argument("-b",
                        "--batch_size",
                        help="number of records per batch",
Exemplo n.º 38
0
            port += 1

    # return the first available port
    return port


# this is the deprecated equivalent of ADD_JARS
add_files = None
if os.environ.get("ADD_FILES") is not None:
    add_files = os.environ.get("ADD_FILES").split(",")

if os.environ.get("SPARK_EXECUTOR_URI"):
    SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])

# setup mesos-based connection
conf = SparkConf().setMaster(os.environ["SPARK_MASTER"])

# optionally set memory limits
if os.environ.get("SPARK_RAM_DRIVER"):
    conf.set("spark.driver.memory", os.environ["SPARK_RAM_DRIVER"])
if os.environ.get("SPARK_RAM_WORKER"):
    conf.set("spark.executor_memory", os.environ["SPARK_RAM_WORKER"])

# set the UI port
conf.set("spark.ui.port", ui_get_available_port())

# optionally set the Spark binary
if os.environ.get("SPARK_BINARY"):
    conf.set("spark.executor.uri", os.environ["SPARK_BINARY"])

# establish config-based context
Exemplo n.º 39
0
    def _do_init(self, master, appName, sparkHome, pyFiles, environment,
                 batchSize, serializer, conf, jsc):
        self.environment = environment or {}
        self._conf = conf or SparkConf(_jvm=self._jvm)
        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.iteritems():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)
        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jsc.accumulator(
            self._jvm.java.util.ArrayList(),
            self._jvm.PythonAccumulatorParam(host, port))

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = set()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename.lower().endswith(
                        "zip") or filename.lower().endswith("egg"):
                    self._python_includes.append(filename)
                    sys.path.insert(
                        1, os.path.join(SparkFiles.getRootDirectory(),
                                        filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir).getAbsolutePath()

        # profiling stats collected for each PythonRDD
        self._profile_stats = []
Exemplo n.º 40
0
    # return the first available port
    return port


# this is the deprecated equivalent of ADD_JARS
add_files = None
if os.environ.get("ADD_FILES") is not None:
    add_files = os.environ.get("ADD_FILES").split(",")

if os.environ.get("SPARK_EXECUTOR_URI"):
    SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])


# setup mesos-based connection
conf = SparkConf().setMaster(os.environ["SPARK_MASTER"])


# set the UI port
conf.set("spark.ui.port", ui_get_available_port())


# optionally set the Spark binary
if os.environ.get("SPARK_BINARY"):
    conf.set("spark.executor.uri", os.environ["SPARK_BINARY"])


# establish config-based context
sc = SparkContext(appName="DockerIPythonShell", pyFiles=add_files, conf=conf)
atexit.register(lambda: sc.stop())
Exemplo n.º 41
0
    import tensorflow_datasets as tfds

    parser = argparse.ArgumentParser()
    parser.add_argument("--num_partitions",
                        help="Number of output partitions",
                        type=int,
                        default=10)
    parser.add_argument(
        "--output",
        help="HDFS directory to save examples in parallelized format",
        default="data/mnist")

    args = parser.parse_args()
    print("args:", args)

    sc = SparkContext(conf=SparkConf().setAppName("mnist_data_setup"))

    mnist, info = tfds.load('mnist', with_info=True)
    print(info.as_json)

    # convert to numpy, then RDDs
    mnist_train = tfds.as_numpy(mnist['train'])
    mnist_test = tfds.as_numpy(mnist['test'])

    train_rdd = sc.parallelize(mnist_train, args.num_partitions).cache()
    test_rdd = sc.parallelize(mnist_test, args.num_partitions).cache()

    # save as CSV (label,comma-separated-features)
    def to_csv(example):
        return str(example['label']) + ',' + ','.join(
            [str(i) for i in example['image'].reshape(784)])
'''
Created on Oct 30, 2015

@author: dyerke
'''
from pyspark.context import SparkContext
from pyspark.conf import SparkConf

if __name__ == '__main__':
    m_hostname= "dyerke-Inspiron-7537"
    #
    conf= SparkConf()
    conf.setAppName("MyTestApp")
    conf.setMaster("spark://" + m_hostname + ":7077")
    conf.setSparkHome("/usr/local/spark")
    conf.set("spark.driver.host", m_hostname)
    logFile = "/usr/local/spark/README.md"  # Should be some file on your system
    #
    sc= SparkContext(conf=conf)
    logData= sc.textFile(logFile).cache()
    #
    countAs= logData.filter(lambda x: 'a' in x).count()
    countBs= logData.filter(lambda x: 'b' in x).count()
    #
    print("Lines with a: %i, lines with b: %i" % (countAs, countBs))
    sc.stop()
Exemplo n.º 43
0
                        default=10)
    parser.add_argument(
        "-o",
        "--output",
        help="HDFS directory to save examples in parallelized format",
        default="mnist_data")
    parser.add_argument("-r",
                        "--read",
                        help="read previously saved examples",
                        action="store_true")
    parser.add_argument("-v",
                        "--verify",
                        help="verify saved examples after writing",
                        action="store_true")

    args = parser.parse_args()
    print("args:", args)

    sc = SparkContext(conf=SparkConf().setAppName("mnist_parallelize"))

    if not args.read:
        # Note: these files are inside the mnist.zip file
        writeMNIST(sc, "mnist/train-images-idx3-ubyte.gz",
                   "mnist/train-labels-idx1-ubyte.gz", args.output + "/train",
                   args.format, args.num_partitions)
        writeMNIST(sc, "mnist/t10k-images-idx3-ubyte.gz",
                   "mnist/t10k-labels-idx1-ubyte.gz", args.output + "/test",
                   args.format, args.num_partitions)

    if args.read or args.verify:
        readMNIST(sc, args.output + "/train", args.format)
Exemplo n.º 44
0
Arquivo: suma.py Projeto: radianv/dpa
from pyspark.sql import functions as F

import logging
#logging.config.fileConfig('dpa_logging.conf')
logger = logging.getLogger('dpa.pipeline.test')


if __name__ == "__main__":

    def f(x):
        x = random() * x
        return x

    sc = SparkContext(appName="PiPySpark")

    conf = SparkConf()

    print(conf.getAll())
    print(sc.version)
    print(sc)
    #print(sys.argv[1])
    #print(sys.argv[2])

    #sqlCtx = HiveContext(sc)

    print("Iniciando la tarea en spark")
    result = sc.parallelize(range(10000))\
               .map(f)\
               .reduce(add)

    print("{result} es nuestra cálculo".format(result=result))
Exemplo n.º 45
0
class SparkContext(object):
    """
    Main entry point for Spark functionality. A SparkContext represents the
    connection to a Spark cluster, and can be used to create :class:`RDD` and
    broadcast variables on that cluster.

    .. note:: Only one :class:`SparkContext` should be active per JVM. You must `stop()`
        the active :class:`SparkContext` before creating a new one.

    .. note:: :class:`SparkContext` instance is not supported to share across multiple
        processes out of the box, and PySpark does not guarantee multi-processing execution.
        Use threads instead for concurrent processing purpose.
    """

    _gateway = None
    _jvm = None
    _next_accum_id = 0
    _active_spark_context = None
    _lock = RLock()
    _python_includes = None  # zip and egg files that need to be added to PYTHONPATH

    PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')

    def __init__(self,
                 master=None,
                 appName=None,
                 sparkHome=None,
                 pyFiles=None,
                 environment=None,
                 batchSize=0,
                 serializer=PickleSerializer(),
                 conf=None,
                 gateway=None,
                 jsc=None,
                 profiler_cls=BasicProfiler):
        """
        Create a new SparkContext. At least the master and app name should be set,
        either through the named parameters here or through `conf`.

        :param master: Cluster URL to connect to
               (e.g. mesos://host:port, spark://host:port, local[4]).
        :param appName: A name for your job, to display on the cluster web UI.
        :param sparkHome: Location where Spark is installed on cluster nodes.
        :param pyFiles: Collection of .zip or .py files to send to the cluster
               and add to PYTHONPATH.  These can be paths on the local file
               system or HDFS, HTTP, HTTPS, or FTP URLs.
        :param environment: A dictionary of environment variables to set on
               worker nodes.
        :param batchSize: The number of Python objects represented as a single
               Java object. Set 1 to disable batching, 0 to automatically choose
               the batch size based on object sizes, or -1 to use an unlimited
               batch size
        :param serializer: The serializer for RDDs.
        :param conf: A :class:`SparkConf` object setting Spark properties.
        :param gateway: Use an existing gateway and JVM, otherwise a new JVM
               will be instantiated.
        :param jsc: The JavaSparkContext instance (optional).
        :param profiler_cls: A class of custom Profiler used to do profiling
               (default is pyspark.profiler.BasicProfiler).


        >>> from pyspark.context import SparkContext
        >>> sc = SparkContext('local', 'test')

        >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        ValueError:...
        """
        self._callsite = first_spark_call() or CallSite(None, None, None)
        if gateway is not None and gateway.gateway_parameters.auth_token is None:
            raise ValueError(
                "You are trying to pass an insecure Py4j gateway to Spark. This"
                " is not allowed as it is a security risk.")

        SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
        try:
            self._do_init(master, appName, sparkHome, pyFiles, environment,
                          batchSize, serializer, conf, jsc, profiler_cls)
        except:
            # If an error occurs, clean up in order to allow future SparkContext creation:
            self.stop()
            raise

    def _do_init(self, master, appName, sparkHome, pyFiles, environment,
                 batchSize, serializer, conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represents the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get(
            "PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        auth_token = self._gateway.gateway_parameters.auth_token
        self._accumulatorServer = accumulators._start_update_server(auth_token)
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(
            host, port, auth_token)
        self._jsc.sc().register(self._javaAccumulator)

        # If encryption is enabled, we need to setup a server in the jvm to read broadcast
        # data via a socket.
        # scala's mangled names w/ $ in them require special treatment.
        self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(
            self._jsc)

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        if sys.version_info < (3, 0):
            with warnings.catch_warnings():
                warnings.simplefilter("once")
                warnings.warn(
                    "Support for Python 2 is deprecated as of Spark 3.0. "
                    "See the plan for dropping Python 2 support at "
                    "https://spark.apache.org/news/plan-for-dropping-python-2-support.html.",
                    DeprecationWarning)

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = BroadcastPickleRegistry()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                try:
                    filepath = os.path.join(SparkFiles.getRootDirectory(),
                                            filename)
                    if not os.path.exists(filepath):
                        # In case of YARN with shell mode, 'spark.submit.pyFiles' files are
                        # not added via SparkContext.addFile. Here we check if the file exists,
                        # try to copy and then add it to the path. See SPARK-21945.
                        shutil.copyfile(path, filepath)
                    if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                        self._python_includes.append(filename)
                        sys.path.insert(1, filepath)
                except Exception:
                    warnings.warn(
                        "Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
                        "Python path:\n  %s" % (path, "\n  ".join(sys.path)),
                        RuntimeWarning)

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(
                profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)

    def __repr__(self):
        return "<SparkContext master={master} appName={appName}>".format(
            master=self.master,
            appName=self.appName,
        )

    def _repr_html_(self):
        return """
        <div>
            <p><b>SparkContext</b></p>

            <p><a href="{sc.uiWebUrl}">Spark UI</a></p>

            <dl>
              <dt>Version</dt>
                <dd><code>v{sc.version}</code></dd>
              <dt>Master</dt>
                <dd><code>{sc.master}</code></dd>
              <dt>AppName</dt>
                <dd><code>{sc.appName}</code></dd>
            </dl>
        </div>
        """.format(sc=self)

    def _initialize_context(self, jconf):
        """
        Initialize SparkContext in function to allow subclass specific initialization
        """
        return self._jvm.JavaSparkContext(jconf)

    @classmethod
    def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
        """
        Checks whether a SparkContext is initialized or not.
        Throws error if a SparkContext is already running.
        """
        with SparkContext._lock:
            if not SparkContext._gateway:
                SparkContext._gateway = gateway or launch_gateway(conf)
                SparkContext._jvm = SparkContext._gateway.jvm

            if instance:
                if (SparkContext._active_spark_context
                        and SparkContext._active_spark_context != instance):
                    currentMaster = SparkContext._active_spark_context.master
                    currentAppName = SparkContext._active_spark_context.appName
                    callsite = SparkContext._active_spark_context._callsite

                    # Raise error if there is already a running Spark context
                    raise ValueError(
                        "Cannot run multiple SparkContexts at once; "
                        "existing SparkContext(app=%s, master=%s)"
                        " created by %s at %s:%s " %
                        (currentAppName, currentMaster, callsite.function,
                         callsite.file, callsite.linenum))
                else:
                    SparkContext._active_spark_context = instance

    def __getnewargs__(self):
        # This method is called when attempting to pickle SparkContext, which is always an error:
        raise Exception(
            "It appears that you are attempting to reference SparkContext from a broadcast "
            "variable, action, or transformation. SparkContext can only be used on the driver, "
            "not in code that it run on workers. For more information, see SPARK-5063."
        )

    def __enter__(self):
        """
        Enable 'with SparkContext(...) as sc: app(sc)' syntax.
        """
        return self

    def __exit__(self, type, value, trace):
        """
        Enable 'with SparkContext(...) as sc: app' syntax.

        Specifically stop the context on exit of the with block.
        """
        self.stop()

    @classmethod
    def getOrCreate(cls, conf=None):
        """
        Get or instantiate a SparkContext and register it as a singleton object.

        :param conf: SparkConf (optional)
        """
        with SparkContext._lock:
            if SparkContext._active_spark_context is None:
                SparkContext(conf=conf or SparkConf())
            return SparkContext._active_spark_context

    def setLogLevel(self, logLevel):
        """
        Control our logLevel. This overrides any user-defined log settings.
        Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
        """
        self._jsc.setLogLevel(logLevel)

    @classmethod
    def setSystemProperty(cls, key, value):
        """
        Set a Java system property, such as spark.executor.memory. This must
        must be invoked before instantiating SparkContext.
        """
        SparkContext._ensure_initialized()
        SparkContext._jvm.java.lang.System.setProperty(key, value)

    @property
    def version(self):
        """
        The version of Spark on which this application is running.
        """
        return self._jsc.version()

    @property
    @ignore_unicode_prefix
    def applicationId(self):
        """
        A unique identifier for the Spark application.
        Its format depends on the scheduler implementation.

        * in case of local spark app something like 'local-1433865536131'
        * in case of YARN something like 'application_1433865536131_34483'

        >>> sc.applicationId  # doctest: +ELLIPSIS
        u'local-...'
        """
        return self._jsc.sc().applicationId()

    @property
    def uiWebUrl(self):
        """Return the URL of the SparkUI instance started by this SparkContext"""
        return self._jsc.sc().uiWebUrl().get()

    @property
    def startTime(self):
        """Return the epoch time when the Spark Context was started."""
        return self._jsc.startTime()

    @property
    def defaultParallelism(self):
        """
        Default level of parallelism to use when not given by user (e.g. for
        reduce tasks)
        """
        return self._jsc.sc().defaultParallelism()

    @property
    def defaultMinPartitions(self):
        """
        Default min number of partitions for Hadoop RDDs when not given by user
        """
        return self._jsc.sc().defaultMinPartitions()

    def stop(self):
        """
        Shut down the SparkContext.
        """
        if getattr(self, "_jsc", None):
            try:
                self._jsc.stop()
            except Py4JError:
                # Case: SPARK-18523
                warnings.warn(
                    'Unable to cleanly shutdown Spark JVM process.'
                    ' It is possible that the process has crashed,'
                    ' been killed or may also be in a zombie state.',
                    RuntimeWarning)
            finally:
                self._jsc = None
        if getattr(self, "_accumulatorServer", None):
            self._accumulatorServer.shutdown()
            self._accumulatorServer = None
        with SparkContext._lock:
            SparkContext._active_spark_context = None

    def emptyRDD(self):
        """
        Create an RDD that has no partitions or elements.
        """
        return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())

    def range(self, start, end=None, step=1, numSlices=None):
        """
        Create a new RDD of int containing elements from `start` to `end`
        (exclusive), increased by `step` every element. Can be called the same
        way as python's built-in range() function. If called with a single argument,
        the argument is interpreted as `end`, and `start` is set to 0.

        :param start: the start value
        :param end: the end value (exclusive)
        :param step: the incremental step (default: 1)
        :param numSlices: the number of partitions of the new RDD
        :return: An RDD of int

        >>> sc.range(5).collect()
        [0, 1, 2, 3, 4]
        >>> sc.range(2, 4).collect()
        [2, 3]
        >>> sc.range(1, 7, 2).collect()
        [1, 3, 5]
        """
        if end is None:
            end = start
            start = 0

        return self.parallelize(xrange(start, end, step), numSlices)

    def parallelize(self, c, numSlices=None):
        """
        Distribute a local Python collection to form an RDD. Using xrange
        is recommended if the input represents a range for performance.

        >>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
        [[0], [2], [3], [4], [6]]
        >>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
        [[], [0], [], [2], [4]]
        """
        numSlices = int(
            numSlices) if numSlices is not None else self.defaultParallelism
        if isinstance(c, xrange):
            size = len(c)
            if size == 0:
                return self.parallelize([], numSlices)
            step = c[1] - c[0] if size > 1 else 1
            start0 = c[0]

            def getStart(split):
                return start0 + int((split * size / numSlices)) * step

            def f(split, iterator):
                # it's an empty iterator here but we need this line for triggering the
                # logic of signal handling in FramedSerializer.load_stream, for instance,
                # SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
                # FramedSerializer.load_stream produces a generator, the control should
                # at least be in that function once. Here we do it by explicitly converting
                # the empty iterator to a list, thus make sure worker reuse takes effect.
                # See more details in SPARK-26549.
                assert len(list(iterator)) == 0
                return xrange(getStart(split), getStart(split + 1), step)

            return self.parallelize([], numSlices).mapPartitionsWithIndex(f)

        # Make sure we distribute data evenly if it's smaller than self.batchSize
        if "__len__" not in dir(c):
            c = list(c)  # Make it a list so we can compute its length
        batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
        serializer = BatchedSerializer(self._unbatched_serializer, batchSize)

        def reader_func(temp_filename):
            return self._jvm.PythonRDD.readRDDFromFile(self._jsc,
                                                       temp_filename,
                                                       numSlices)

        def createRDDServer():
            return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)

        jrdd = self._serialize_to_jvm(c, serializer, reader_func,
                                      createRDDServer)
        return RDD(jrdd, self, serializer)

    def _serialize_to_jvm(self, data, serializer, reader_func,
                          createRDDServer):
        """
        Using py4j to send a large dataset to the jvm is really slow, so we use either a file
        or a socket if we have encryption enabled.
        :param data:
        :param serializer:
        :param reader_func:  A function which takes a filename and reads in the data in the jvm and
                returns a JavaRDD. Only used when encryption is disabled.
        :param createRDDServer:  A function which creates a PythonRDDServer in the jvm to
               accept the serialized data, for use when encryption is enabled.
        :return:
        """
        if self._encryption_enabled:
            # with encryption, we open a server in java and send the data directly
            server = createRDDServer()
            (sock_file, _) = local_connect_and_auth(server.port(),
                                                    server.secret())
            chunked_out = ChunkedStream(sock_file, 8192)
            serializer.dump_stream(data, chunked_out)
            chunked_out.close()
            # this call will block until the server has read all the data and processed it (or
            # throws an exception)
            r = server.getResult()
            return r
        else:
            # without encryption, we serialize to a file, and we read the file in java and
            # parallelize from there.
            tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
            try:
                try:
                    serializer.dump_stream(data, tempFile)
                finally:
                    tempFile.close()
                return reader_func(tempFile.name)
            finally:
                # we eagerily reads the file so we can delete right after.
                os.unlink(tempFile.name)

    def pickleFile(self, name, minPartitions=None):
        """
        Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.

        >>> tmpFile = NamedTemporaryFile(delete=True)
        >>> tmpFile.close()
        >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
        >>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
        [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.objectFile(name, minPartitions), self)

    @ignore_unicode_prefix
    def textFile(self, name, minPartitions=None, use_unicode=True):
        """
        Read a text file from HDFS, a local file system (available on all
        nodes), or any Hadoop-supported file system URI, and return it as an
        RDD of Strings.
        The text files must be encoded as UTF-8.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        >>> path = os.path.join(tempdir, "sample-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello world!")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello world!']
        """
        minPartitions = minPartitions or min(self.defaultParallelism, 2)
        return RDD(self._jsc.textFile(name, minPartitions), self,
                   UTF8Deserializer(use_unicode))

    @ignore_unicode_prefix
    def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
        """
        Read a directory of text files from HDFS, a local file system
        (available on all nodes), or any  Hadoop-supported file system
        URI. Each file is read as a single record and returned in a
        key-value pair, where the key is the path of each file, the
        value is the content of each file.
        The text files must be encoded as UTF-8.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        For example, if you have the following files:

        .. code-block:: text

            hdfs://a-hdfs-path/part-00000
            hdfs://a-hdfs-path/part-00001
            ...
            hdfs://a-hdfs-path/part-nnnnn

        Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
        then ``rdd`` contains:

        .. code-block:: text

            (a-hdfs-path/part-00000, its content)
            (a-hdfs-path/part-00001, its content)
            ...
            (a-hdfs-path/part-nnnnn, its content)

        .. note:: Small files are preferred, as each file will be loaded
            fully in memory.

        >>> dirPath = os.path.join(tempdir, "files")
        >>> os.mkdir(dirPath)
        >>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
        ...    _ = file1.write("1")
        >>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
        ...    _ = file2.write("2")
        >>> textFiles = sc.wholeTextFiles(dirPath)
        >>> sorted(textFiles.collect())
        [(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(
            self._jsc.wholeTextFiles(path, minPartitions), self,
            PairDeserializer(UTF8Deserializer(use_unicode),
                             UTF8Deserializer(use_unicode)))

    def binaryFiles(self, path, minPartitions=None):
        """
        .. note:: Experimental

        Read a directory of binary files from HDFS, a local file system
        (available on all nodes), or any Hadoop-supported file system URI
        as a byte array. Each file is read as a single record and returned
        in a key-value pair, where the key is the path of each file, the
        value is the content of each file.

        .. note:: Small files are preferred, large file is also allowable, but
            may cause bad performance.
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.binaryFiles(path, minPartitions), self,
                   PairDeserializer(UTF8Deserializer(), NoOpSerializer()))

    def binaryRecords(self, path, recordLength):
        """
        .. note:: Experimental

        Load data from a flat binary file, assuming each record is a set of numbers
        with the specified numerical format (see ByteBuffer), and the number of
        bytes per record is constant.

        :param path: Directory to the input data files
        :param recordLength: The length at which to split the records
        """
        return RDD(self._jsc.binaryRecords(path, recordLength), self,
                   NoOpSerializer())

    def _dictToJavaMap(self, d):
        jm = self._jvm.java.util.HashMap()
        if not d:
            d = {}
        for k, v in d.items():
            jm[k] = v
        return jm

    def sequenceFile(self,
                     path,
                     keyClass=None,
                     valueClass=None,
                     keyConverter=None,
                     valueConverter=None,
                     minSplits=None,
                     batchSize=0):
        """
        Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is as follows:

            1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
               and value Writable classes
            2. Serialization is attempted via Pyrolite pickling
            3. If this fails, the fallback is to call 'toString' on each key and value
            4. :class:`PickleSerializer` is used to deserialize pickled objects on the Python side

        :param path: path to sequncefile
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter:
        :param valueConverter:
        :param minSplits: minimum splits in dataset
               (default min(2, sc.defaultParallelism))
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        minSplits = minSplits or min(self.defaultParallelism, 2)
        jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass,
                                                valueClass, keyConverter,
                                                valueConverter, minSplits,
                                                batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopFile(self,
                         path,
                         inputFormatClass,
                         keyClass,
                         valueClass,
                         keyConverter=None,
                         valueConverter=None,
                         conf=None,
                         batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path,
                                                    inputFormatClass, keyClass,
                                                    valueClass, keyConverter,
                                                    valueConverter, jconf,
                                                    batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopRDD(self,
                        inputFormatClass,
                        keyClass,
                        valueClass,
                        keyConverter=None,
                        valueConverter=None,
                        conf=None,
                        batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass,
                                                   keyClass, valueClass,
                                                   keyConverter,
                                                   valueConverter, jconf,
                                                   batchSize)
        return RDD(jrdd, self)

    def hadoopFile(self,
                   path,
                   inputFormatClass,
                   keyClass,
                   valueClass,
                   keyConverter=None,
                   valueConverter=None,
                   conf=None,
                   batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java.

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path,
                                              inputFormatClass, keyClass,
                                              valueClass, keyConverter,
                                              valueConverter, jconf, batchSize)
        return RDD(jrdd, self)

    def hadoopRDD(self,
                  inputFormatClass,
                  keyClass,
                  valueClass,
                  keyConverter=None,
                  valueConverter=None,
                  conf=None,
                  batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass,
                                             keyClass, valueClass,
                                             keyConverter, valueConverter,
                                             jconf, batchSize)
        return RDD(jrdd, self)

    def _checkpointFile(self, name, input_deserializer):
        jrdd = self._jsc.checkpointFile(name)
        return RDD(jrdd, self, input_deserializer)

    @ignore_unicode_prefix
    def union(self, rdds):
        """
        Build the union of a list of RDDs.

        This supports unions() of RDDs with different serialized formats,
        although this forces them to be reserialized using the default
        serializer:

        >>> path = os.path.join(tempdir, "union-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello']
        >>> parallelized = sc.parallelize(["World!"])
        >>> sorted(sc.union([textFile, parallelized]).collect())
        [u'Hello', 'World!']
        """
        first_jrdd_deserializer = rdds[0]._jrdd_deserializer
        if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
            rdds = [x._reserialize() for x in rdds]
        cls = SparkContext._jvm.org.apache.spark.api.java.JavaRDD
        jrdds = SparkContext._gateway.new_array(cls, len(rdds))
        for i in range(0, len(rdds)):
            jrdds[i] = rdds[i]._jrdd
        return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)

    def broadcast(self, value):
        """
        Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
        object for reading it in distributed functions. The variable will
        be sent to each cluster only once.
        """
        return Broadcast(self, value, self._pickled_broadcast_vars)

    def accumulator(self, value, accum_param=None):
        """
        Create an :class:`Accumulator` with the given initial value, using a given
        :class:`AccumulatorParam` helper object to define how to add values of the
        data type if provided. Default AccumulatorParams are used for integers
        and floating-point numbers if you do not provide one. For other types,
        a custom AccumulatorParam can be used.
        """
        if accum_param is None:
            if isinstance(value, int):
                accum_param = accumulators.INT_ACCUMULATOR_PARAM
            elif isinstance(value, float):
                accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
            elif isinstance(value, complex):
                accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
            else:
                raise TypeError("No default accumulator param for type %s" %
                                type(value))
        SparkContext._next_accum_id += 1
        return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)

    def addFile(self, path, recursive=False):
        """
        Add a file to be downloaded with this Spark job on every node.
        The `path` passed can be either a local file, a file in HDFS
        (or other Hadoop-supported filesystems), or an HTTP, HTTPS or
        FTP URI.

        To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
        filename to find its download location.

        A directory can be given if the recursive option is set to True.
        Currently directories are only supported for Hadoop-supported filesystems.

        .. note:: A path can be added only once. Subsequent additions of the same path are ignored.

        >>> from pyspark import SparkFiles
        >>> path = os.path.join(tempdir, "test.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("100")
        >>> sc.addFile(path)
        >>> def func(iterator):
        ...    with open(SparkFiles.get("test.txt")) as testFile:
        ...        fileVal = int(testFile.readline())
        ...        return [x * fileVal for x in iterator]
        >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
        [100, 200, 300, 400]
        """
        self._jsc.sc().addFile(path, recursive)

    def addPyFile(self, path):
        """
        Add a .py or .zip dependency for all tasks to be executed on this
        SparkContext in the future.  The `path` passed can be either a local
        file, a file in HDFS (or other Hadoop-supported filesystems), or an
        HTTP, HTTPS or FTP URI.

        .. note:: A path can be added only once. Subsequent additions of the same path are ignored.
        """
        self.addFile(path)
        (dirname, filename) = os.path.split(
            path)  # dirname may be directory or HDFS/S3 prefix
        if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
            self._python_includes.append(filename)
            # for tests in local mode
            sys.path.insert(
                1, os.path.join(SparkFiles.getRootDirectory(), filename))
        if sys.version > '3':
            import importlib
            importlib.invalidate_caches()

    def setCheckpointDir(self, dirName):
        """
        Set the directory under which RDDs are going to be checkpointed. The
        directory must be a HDFS path if running on a cluster.
        """
        self._jsc.sc().setCheckpointDir(dirName)

    def _getJavaStorageLevel(self, storageLevel):
        """
        Returns a Java StorageLevel based on a pyspark.StorageLevel.
        """
        if not isinstance(storageLevel, StorageLevel):
            raise Exception(
                "storageLevel must be of type pyspark.StorageLevel")

        newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
        return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory,
                               storageLevel.useOffHeap,
                               storageLevel.deserialized,
                               storageLevel.replication)

    def setJobGroup(self, groupId, description, interruptOnCancel=False):
        """
        Assigns a group ID to all the jobs started by this thread until the group ID is set to a
        different value or cleared.

        Often, a unit of execution in an application consists of multiple Spark actions or jobs.
        Application programmers can use this method to group all those jobs together and give a
        group description. Once set, the Spark web UI will associate such jobs with this group.

        The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
        running jobs in this group.

        >>> import threading
        >>> from time import sleep
        >>> result = "Not Set"
        >>> lock = threading.Lock()
        >>> def map_func(x):
        ...     sleep(100)
        ...     raise Exception("Task should have been cancelled")
        >>> def start_job(x):
        ...     global result
        ...     try:
        ...         sc.setJobGroup("job_to_cancel", "some description")
        ...         result = sc.parallelize(range(x)).map(map_func).collect()
        ...     except Exception as e:
        ...         result = "Cancelled"
        ...     lock.release()
        >>> def stop_job():
        ...     sleep(5)
        ...     sc.cancelJobGroup("job_to_cancel")
        >>> suppress = lock.acquire()
        >>> suppress = threading.Thread(target=start_job, args=(10,)).start()
        >>> suppress = threading.Thread(target=stop_job).start()
        >>> suppress = lock.acquire()
        >>> print(result)
        Cancelled

        If interruptOnCancel is set to true for the job group, then job cancellation will result
        in Thread.interrupt() being called on the job's executor threads. This is useful to help
        ensure that the tasks are actually stopped in a timely manner, but is off by default due
        to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
        """
        self._jsc.setJobGroup(groupId, description, interruptOnCancel)

    def setLocalProperty(self, key, value):
        """
        Set a local property that affects jobs submitted from this thread, such as the
        Spark fair scheduler pool.
        """
        self._jsc.setLocalProperty(key, value)

    def getLocalProperty(self, key):
        """
        Get a local property set in this thread, or null if it is missing. See
        :meth:`setLocalProperty`.
        """
        return self._jsc.getLocalProperty(key)

    def setJobDescription(self, value):
        """
        Set a human readable description of the current job.
        """
        self._jsc.setJobDescription(value)

    def sparkUser(self):
        """
        Get SPARK_USER for user who is running SparkContext.
        """
        return self._jsc.sc().sparkUser()

    def cancelJobGroup(self, groupId):
        """
        Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
        for more information.
        """
        self._jsc.sc().cancelJobGroup(groupId)

    def cancelAllJobs(self):
        """
        Cancel all jobs that have been scheduled or are running.
        """
        self._jsc.sc().cancelAllJobs()

    def statusTracker(self):
        """
        Return :class:`StatusTracker` object
        """
        return StatusTracker(self._jsc.statusTracker())

    def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
        """
        Executes the given partitionFunc on the specified set of partitions,
        returning the result as an array of elements.

        If 'partitions' is not specified, this will run over all partitions.

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part])
        [0, 1, 4, 9, 16, 25]

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
        [0, 1, 16, 25]
        """
        if partitions is None:
            partitions = range(rdd._jrdd.partitions().size())

        # Implementation note: This is implemented as a mapPartitions followed
        # by runJob() in order to avoid having to pass a Python lambda into
        # SparkContext#runJob.
        mappedRDD = rdd.mapPartitions(partitionFunc)
        sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd,
                                               partitions)
        return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))

    def show_profiles(self):
        """ Print the profile stats to stdout """
        if self.profiler_collector is not None:
            self.profiler_collector.show_profiles()
        else:
            raise RuntimeError(
                "'spark.python.profile' configuration must be set "
                "to 'true' to enable Python profile.")

    def dump_profiles(self, path):
        """ Dump the profile stats into directory `path`
        """
        if self.profiler_collector is not None:
            self.profiler_collector.dump_profiles(path)
        else:
            raise RuntimeError(
                "'spark.python.profile' configuration must be set "
                "to 'true' to enable Python profile.")

    def getConf(self):
        conf = SparkConf()
        conf.setAll(self._conf.getAll())
        return conf
Exemplo n.º 46
0
from pyspark.ml.feature import ChiSqSelector
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.classification import GBTClassifier
from pyspark.sql.functions import lit
from pyspark.ml.classification import LinearSVC
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
import time

# start Spark Session
from pyspark.sql import SparkSession
from pyspark.conf import SparkConf

sc_conf = SparkConf()
sc_conf.setAppName("w261_final_rishi")
sc_conf.set('spark.executor.memory', '18g')
sc_conf.set('spark.driver.memory', '18g')
sc_conf.set('spark.executor.cores', '7')
sc_conf.set('spark.driver.cores', '7')

spark = SparkSession\
        .builder\
        .config(conf=sc_conf)\
        .getOrCreate()

sc = spark.sparkContext

df_train = spark.read.csv('gs://w261hw5rishi/projdata/dac/train.csv',
                          header=True,
    arguments = docopt(__doc__)

    # Get the options in a more usable fashion.
    algorithm_type = arguments["<algorithm>"]
    data_path = arguments["<dataset_path>"]
    output_path = arguments["<output_path>"]
    verbose = arguments["--verbose"]
    max_clusters = arguments["--c"]
    max_iter = arguments["--i"]
    app_name = arguments["--app-name"] if arguments[
        "--app-name"] else "twitter-clustering"

    # We use directly the SparkSession here instead of SparkConf and SparkContext,
    # since now the SparkSession is the entry point for all functionatilies of pyspark.
    # See stackoverflow.com/questions/43802809/difference-between-sparkcontext-javasparkcontext-sqlcontext-sparksession
    conf = SparkConf().setAppName(app_name)
    conf = (conf.set('spark.executor.memory', '20G').set(
        'spark.driver.memory',
        '20G').set('spark.driver.maxResultSize',
                   '10G').set('spark.executor.cores',
                              5).set('spark.executor.instances',
                                     4).set('spark.default.parallelism', 20))
    spark = SparkSession.builder.config(conf=conf).getOrCreate()

    # Get the dataset.
    if arguments["--aws"]:
        aws_token = arguments["--aws-token"] if arguments[
            "--aws-token"] else os.environ["ACCESS_TOKEN"]
        aws_secret = arguments["--aws-secret"] if arguments[
            "--aws-secret"] else os.environ["ACCESS_SECRET"]
        spark.sparkContext._jsc.hadoopConfiguration().set(
Exemplo n.º 48
0
 def getConf(self):
     conf = SparkConf()
     conf.setAll(self._conf.getAll())
     return conf
Exemplo n.º 49
0
# import libraries
import sys
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
from pyspark.sql import functions as F
from pyspark.sql import types as T
from pyspark.sql.window import Window

# getting arguments
years = sys.argv[1]
months = sys.argv[2]

# init sparkConf
conf = SparkConf()
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
    .set("spark.executor.cores", "1") \
    .set("spark.executor.memory", "1G") \
    .set("spark.sql.cbo.enabled", "true")

spark = SparkSession.builder.appName("ALIM_KYL_VTE_" + years + "_" + months)\
                    .config(conf=SparkConf()).enableHiveSupport().getOrCreate()

sc = spark.sparkContext
sqlContext = HiveContext(sc)
sqlContext.setConf("hive.exec.dynamic.partition", "true")
sqlContext.setConf("hive.exec.dynamic.partition.mode", "nonstrict")

months = str(int(months))
years = str(int(years))
Exemplo n.º 50
0
class SparkContext(object):

    """
    Main entry point for Spark functionality. A SparkContext represents the
    connection to a Spark cluster, and can be used to create L{RDD} and
    broadcast variables on that cluster.
    """

    _gateway = None
    _jvm = None
    _next_accum_id = 0
    _active_spark_context = None
    _lock = RLock()
    _python_includes = None  # zip and egg files that need to be added to PYTHONPATH

    PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')

    def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
                 environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
                 gateway=None, jsc=None, profiler_cls=BasicProfiler):
        """
        Create a new SparkContext. At least the master and app name should be set,
        either through the named parameters here or through C{conf}.

        :param master: Cluster URL to connect to
               (e.g. mesos://host:port, spark://host:port, local[4]).
        :param appName: A name for your job, to display on the cluster web UI.
        :param sparkHome: Location where Spark is installed on cluster nodes.
        :param pyFiles: Collection of .zip or .py files to send to the cluster
               and add to PYTHONPATH.  These can be paths on the local file
               system or HDFS, HTTP, HTTPS, or FTP URLs.
        :param environment: A dictionary of environment variables to set on
               worker nodes.
        :param batchSize: The number of Python objects represented as a single
               Java object. Set 1 to disable batching, 0 to automatically choose
               the batch size based on object sizes, or -1 to use an unlimited
               batch size
        :param serializer: The serializer for RDDs.
        :param conf: A L{SparkConf} object setting Spark properties.
        :param gateway: Use an existing gateway and JVM, otherwise a new JVM
               will be instantiated.
        :param jsc: The JavaSparkContext instance (optional).
        :param profiler_cls: A class of custom Profiler used to do profiling
               (default is pyspark.profiler.BasicProfiler).


        >>> from pyspark.context import SparkContext
        >>> sc = SparkContext('local', 'test')

        >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        ValueError:...
        """
        self._callsite = first_spark_call() or CallSite(None, None, None)
        SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
        try:
            self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
                          conf, jsc, profiler_cls)
        except:
            # If an error occurs, clean up in order to allow future SparkContext creation:
            self.stop()
            raise

    def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
                 conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represent the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception("An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
        self._jsc.sc().register(self._javaAccumulator)

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = BroadcastPickleRegistry()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                    self._python_includes.append(filename)
                    sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)

    def __repr__(self):
        return "<SparkContext master={master} appName={appName}>".format(
            master=self.master,
            appName=self.appName,
        )

    def _repr_html_(self):
        return """
        <div>
            <p><b>SparkContext</b></p>

            <p><a href="{sc.uiWebUrl}">Spark UI</a></p>

            <dl>
              <dt>Version</dt>
                <dd><code>v{sc.version}</code></dd>
              <dt>Master</dt>
                <dd><code>{sc.master}</code></dd>
              <dt>AppName</dt>
                <dd><code>{sc.appName}</code></dd>
            </dl>
        </div>
        """.format(
            sc=self
        )

    def _initialize_context(self, jconf):
        """
        Initialize SparkContext in function to allow subclass specific initialization
        """
        return self._jvm.JavaSparkContext(jconf)

    @classmethod
    def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
        """
        Checks whether a SparkContext is initialized or not.
        Throws error if a SparkContext is already running.
        """
        with SparkContext._lock:
            if not SparkContext._gateway:
                SparkContext._gateway = gateway or launch_gateway(conf)
                SparkContext._jvm = SparkContext._gateway.jvm

            if instance:
                if (SparkContext._active_spark_context and
                        SparkContext._active_spark_context != instance):
                    currentMaster = SparkContext._active_spark_context.master
                    currentAppName = SparkContext._active_spark_context.appName
                    callsite = SparkContext._active_spark_context._callsite

                    # Raise error if there is already a running Spark context
                    raise ValueError(
                        "Cannot run multiple SparkContexts at once; "
                        "existing SparkContext(app=%s, master=%s)"
                        " created by %s at %s:%s "
                        % (currentAppName, currentMaster,
                            callsite.function, callsite.file, callsite.linenum))
                else:
                    SparkContext._active_spark_context = instance

    def __getnewargs__(self):
        # This method is called when attempting to pickle SparkContext, which is always an error:
        raise Exception(
            "It appears that you are attempting to reference SparkContext from a broadcast "
            "variable, action, or transformation. SparkContext can only be used on the driver, "
            "not in code that it run on workers. For more information, see SPARK-5063."
        )

    def __enter__(self):
        """
        Enable 'with SparkContext(...) as sc: app(sc)' syntax.
        """
        return self

    def __exit__(self, type, value, trace):
        """
        Enable 'with SparkContext(...) as sc: app' syntax.

        Specifically stop the context on exit of the with block.
        """
        self.stop()

    @classmethod
    def getOrCreate(cls, conf=None):
        """
        Get or instantiate a SparkContext and register it as a singleton object.

        :param conf: SparkConf (optional)
        """
        with SparkContext._lock:
            if SparkContext._active_spark_context is None:
                SparkContext(conf=conf or SparkConf())
            return SparkContext._active_spark_context

    def setLogLevel(self, logLevel):
        """
        Control our logLevel. This overrides any user-defined log settings.
        Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
        """
        self._jsc.setLogLevel(logLevel)

    @classmethod
    def setSystemProperty(cls, key, value):
        """
        Set a Java system property, such as spark.executor.memory. This must
        must be invoked before instantiating SparkContext.
        """
        SparkContext._ensure_initialized()
        SparkContext._jvm.java.lang.System.setProperty(key, value)

    @property
    def version(self):
        """
        The version of Spark on which this application is running.
        """
        return self._jsc.version()

    @property
    @ignore_unicode_prefix
    def applicationId(self):
        """
        A unique identifier for the Spark application.
        Its format depends on the scheduler implementation.

        * in case of local spark app something like 'local-1433865536131'
        * in case of YARN something like 'application_1433865536131_34483'

        >>> sc.applicationId  # doctest: +ELLIPSIS
        u'local-...'
        """
        return self._jsc.sc().applicationId()

    @property
    def uiWebUrl(self):
        """Return the URL of the SparkUI instance started by this SparkContext"""
        return self._jsc.sc().uiWebUrl().get()

    @property
    def startTime(self):
        """Return the epoch time when the Spark Context was started."""
        return self._jsc.startTime()

    @property
    def defaultParallelism(self):
        """
        Default level of parallelism to use when not given by user (e.g. for
        reduce tasks)
        """
        return self._jsc.sc().defaultParallelism()

    @property
    def defaultMinPartitions(self):
        """
        Default min number of partitions for Hadoop RDDs when not given by user
        """
        return self._jsc.sc().defaultMinPartitions()

    def stop(self):
        """
        Shut down the SparkContext.
        """
        if getattr(self, "_jsc", None):
            try:
                self._jsc.stop()
            except Py4JError:
                # Case: SPARK-18523
                warnings.warn(
                    'Unable to cleanly shutdown Spark JVM process.'
                    ' It is possible that the process has crashed,'
                    ' been killed or may also be in a zombie state.',
                    RuntimeWarning
                )
                pass
            finally:
                self._jsc = None
        if getattr(self, "_accumulatorServer", None):
            self._accumulatorServer.shutdown()
            self._accumulatorServer = None
        with SparkContext._lock:
            SparkContext._active_spark_context = None

    def emptyRDD(self):
        """
        Create an RDD that has no partitions or elements.
        """
        return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())

    def range(self, start, end=None, step=1, numSlices=None):
        """
        Create a new RDD of int containing elements from `start` to `end`
        (exclusive), increased by `step` every element. Can be called the same
        way as python's built-in range() function. If called with a single argument,
        the argument is interpreted as `end`, and `start` is set to 0.

        :param start: the start value
        :param end: the end value (exclusive)
        :param step: the incremental step (default: 1)
        :param numSlices: the number of partitions of the new RDD
        :return: An RDD of int

        >>> sc.range(5).collect()
        [0, 1, 2, 3, 4]
        >>> sc.range(2, 4).collect()
        [2, 3]
        >>> sc.range(1, 7, 2).collect()
        [1, 3, 5]
        """
        if end is None:
            end = start
            start = 0

        return self.parallelize(xrange(start, end, step), numSlices)

    def parallelize(self, c, numSlices=None):
        """
        Distribute a local Python collection to form an RDD. Using xrange
        is recommended if the input represents a range for performance.

        >>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
        [[0], [2], [3], [4], [6]]
        >>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
        [[], [0], [], [2], [4]]
        """
        numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
        if isinstance(c, xrange):
            size = len(c)
            if size == 0:
                return self.parallelize([], numSlices)
            step = c[1] - c[0] if size > 1 else 1
            start0 = c[0]

            def getStart(split):
                return start0 + int((split * size / numSlices)) * step

            def f(split, iterator):
                return xrange(getStart(split), getStart(split + 1), step)

            return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
        # Calling the Java parallelize() method with an ArrayList is too slow,
        # because it sends O(n) Py4J commands.  As an alternative, serialized
        # objects are written to a file and loaded through textFile().
        tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
        try:
            # Make sure we distribute data evenly if it's smaller than self.batchSize
            if "__len__" not in dir(c):
                c = list(c)    # Make it a list so we can compute its length
            batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
            serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
            serializer.dump_stream(c, tempFile)
            tempFile.close()
            readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
            jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
        finally:
            # readRDDFromFile eagerily reads the file so we can delete right after.
            os.unlink(tempFile.name)
        return RDD(jrdd, self, serializer)

    def pickleFile(self, name, minPartitions=None):
        """
        Load an RDD previously saved using L{RDD.saveAsPickleFile} method.

        >>> tmpFile = NamedTemporaryFile(delete=True)
        >>> tmpFile.close()
        >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
        >>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
        [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.objectFile(name, minPartitions), self)

    @ignore_unicode_prefix
    def textFile(self, name, minPartitions=None, use_unicode=True):
        """
        Read a text file from HDFS, a local file system (available on all
        nodes), or any Hadoop-supported file system URI, and return it as an
        RDD of Strings.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        >>> path = os.path.join(tempdir, "sample-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello world!")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello world!']
        """
        minPartitions = minPartitions or min(self.defaultParallelism, 2)
        return RDD(self._jsc.textFile(name, minPartitions), self,
                   UTF8Deserializer(use_unicode))

    @ignore_unicode_prefix
    def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
        """
        Read a directory of text files from HDFS, a local file system
        (available on all nodes), or any  Hadoop-supported file system
        URI. Each file is read as a single record and returned in a
        key-value pair, where the key is the path of each file, the
        value is the content of each file.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        For example, if you have the following files::

          hdfs://a-hdfs-path/part-00000
          hdfs://a-hdfs-path/part-00001
          ...
          hdfs://a-hdfs-path/part-nnnnn

        Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
        then C{rdd} contains::

          (a-hdfs-path/part-00000, its content)
          (a-hdfs-path/part-00001, its content)
          ...
          (a-hdfs-path/part-nnnnn, its content)

        .. note:: Small files are preferred, as each file will be loaded
            fully in memory.

        >>> dirPath = os.path.join(tempdir, "files")
        >>> os.mkdir(dirPath)
        >>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
        ...    _ = file1.write("1")
        >>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
        ...    _ = file2.write("2")
        >>> textFiles = sc.wholeTextFiles(dirPath)
        >>> sorted(textFiles.collect())
        [(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
                   PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))

    def binaryFiles(self, path, minPartitions=None):
        """
        .. note:: Experimental

        Read a directory of binary files from HDFS, a local file system
        (available on all nodes), or any Hadoop-supported file system URI
        as a byte array. Each file is read as a single record and returned
        in a key-value pair, where the key is the path of each file, the
        value is the content of each file.

        .. note:: Small files are preferred, large file is also allowable, but
            may cause bad performance.
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.binaryFiles(path, minPartitions), self,
                   PairDeserializer(UTF8Deserializer(), NoOpSerializer()))

    def binaryRecords(self, path, recordLength):
        """
        .. note:: Experimental

        Load data from a flat binary file, assuming each record is a set of numbers
        with the specified numerical format (see ByteBuffer), and the number of
        bytes per record is constant.

        :param path: Directory to the input data files
        :param recordLength: The length at which to split the records
        """
        return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())

    def _dictToJavaMap(self, d):
        jm = self._jvm.java.util.HashMap()
        if not d:
            d = {}
        for k, v in d.items():
            jm[k] = v
        return jm

    def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
                     valueConverter=None, minSplits=None, batchSize=0):
        """
        Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is as follows:

            1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
               and value Writable classes
            2. Serialization is attempted via Pyrolite pickling
            3. If this fails, the fallback is to call 'toString' on each key and value
            4. C{PickleSerializer} is used to deserialize pickled objects on the Python side

        :param path: path to sequncefile
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter:
        :param valueConverter:
        :param minSplits: minimum splits in dataset
               (default min(2, sc.defaultParallelism))
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        minSplits = minSplits or min(self.defaultParallelism, 2)
        jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
                                                keyConverter, valueConverter, minSplits, batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
                         valueConverter=None, conf=None, batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
                                                    valueClass, keyConverter, valueConverter,
                                                    jconf, batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
                        valueConverter=None, conf=None, batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
                                                   valueClass, keyConverter, valueConverter,
                                                   jconf, batchSize)
        return RDD(jrdd, self)

    def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
                   valueConverter=None, conf=None, batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java.

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
                                              valueClass, keyConverter, valueConverter,
                                              jconf, batchSize)
        return RDD(jrdd, self)

    def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
                  valueConverter=None, conf=None, batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
                                             valueClass, keyConverter, valueConverter,
                                             jconf, batchSize)
        return RDD(jrdd, self)

    def _checkpointFile(self, name, input_deserializer):
        jrdd = self._jsc.checkpointFile(name)
        return RDD(jrdd, self, input_deserializer)

    @ignore_unicode_prefix
    def union(self, rdds):
        """
        Build the union of a list of RDDs.

        This supports unions() of RDDs with different serialized formats,
        although this forces them to be reserialized using the default
        serializer:

        >>> path = os.path.join(tempdir, "union-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello']
        >>> parallelized = sc.parallelize(["World!"])
        >>> sorted(sc.union([textFile, parallelized]).collect())
        [u'Hello', 'World!']
        """
        first_jrdd_deserializer = rdds[0]._jrdd_deserializer
        if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
            rdds = [x._reserialize() for x in rdds]
        first = rdds[0]._jrdd
        rest = [x._jrdd for x in rdds[1:]]
        return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)

    def broadcast(self, value):
        """
        Broadcast a read-only variable to the cluster, returning a
        L{Broadcast<pyspark.broadcast.Broadcast>}
        object for reading it in distributed functions. The variable will
        be sent to each cluster only once.
        """
        return Broadcast(self, value, self._pickled_broadcast_vars)

    def accumulator(self, value, accum_param=None):
        """
        Create an L{Accumulator} with the given initial value, using a given
        L{AccumulatorParam} helper object to define how to add values of the
        data type if provided. Default AccumulatorParams are used for integers
        and floating-point numbers if you do not provide one. For other types,
        a custom AccumulatorParam can be used.
        """
        if accum_param is None:
            if isinstance(value, int):
                accum_param = accumulators.INT_ACCUMULATOR_PARAM
            elif isinstance(value, float):
                accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
            elif isinstance(value, complex):
                accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
            else:
                raise TypeError("No default accumulator param for type %s" % type(value))
        SparkContext._next_accum_id += 1
        return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)

    def addFile(self, path, recursive=False):
        """
        Add a file to be downloaded with this Spark job on every node.
        The C{path} passed can be either a local file, a file in HDFS
        (or other Hadoop-supported filesystems), or an HTTP, HTTPS or
        FTP URI.

        To access the file in Spark jobs, use
        L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
        filename to find its download location.

        A directory can be given if the recursive option is set to True.
        Currently directories are only supported for Hadoop-supported filesystems.

        >>> from pyspark import SparkFiles
        >>> path = os.path.join(tempdir, "test.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("100")
        >>> sc.addFile(path)
        >>> def func(iterator):
        ...    with open(SparkFiles.get("test.txt")) as testFile:
        ...        fileVal = int(testFile.readline())
        ...        return [x * fileVal for x in iterator]
        >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
        [100, 200, 300, 400]
        """
        self._jsc.sc().addFile(path, recursive)

    def addPyFile(self, path):
        """
        Add a .py or .zip dependency for all tasks to be executed on this
        SparkContext in the future.  The C{path} passed can be either a local
        file, a file in HDFS (or other Hadoop-supported filesystems), or an
        HTTP, HTTPS or FTP URI.
        """
        self.addFile(path)
        (dirname, filename) = os.path.split(path)  # dirname may be directory or HDFS/S3 prefix
        if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
            self._python_includes.append(filename)
            # for tests in local mode
            sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
        if sys.version > '3':
            import importlib
            importlib.invalidate_caches()

    def setCheckpointDir(self, dirName):
        """
        Set the directory under which RDDs are going to be checkpointed. The
        directory must be a HDFS path if running on a cluster.
        """
        self._jsc.sc().setCheckpointDir(dirName)

    def _getJavaStorageLevel(self, storageLevel):
        """
        Returns a Java StorageLevel based on a pyspark.StorageLevel.
        """
        if not isinstance(storageLevel, StorageLevel):
            raise Exception("storageLevel must be of type pyspark.StorageLevel")

        newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
        return newStorageLevel(storageLevel.useDisk,
                               storageLevel.useMemory,
                               storageLevel.useOffHeap,
                               storageLevel.deserialized,
                               storageLevel.replication)

    def setJobGroup(self, groupId, description, interruptOnCancel=False):
        """
        Assigns a group ID to all the jobs started by this thread until the group ID is set to a
        different value or cleared.

        Often, a unit of execution in an application consists of multiple Spark actions or jobs.
        Application programmers can use this method to group all those jobs together and give a
        group description. Once set, the Spark web UI will associate such jobs with this group.

        The application can use L{SparkContext.cancelJobGroup} to cancel all
        running jobs in this group.

        >>> import threading
        >>> from time import sleep
        >>> result = "Not Set"
        >>> lock = threading.Lock()
        >>> def map_func(x):
        ...     sleep(100)
        ...     raise Exception("Task should have been cancelled")
        >>> def start_job(x):
        ...     global result
        ...     try:
        ...         sc.setJobGroup("job_to_cancel", "some description")
        ...         result = sc.parallelize(range(x)).map(map_func).collect()
        ...     except Exception as e:
        ...         result = "Cancelled"
        ...     lock.release()
        >>> def stop_job():
        ...     sleep(5)
        ...     sc.cancelJobGroup("job_to_cancel")
        >>> supress = lock.acquire()
        >>> supress = threading.Thread(target=start_job, args=(10,)).start()
        >>> supress = threading.Thread(target=stop_job).start()
        >>> supress = lock.acquire()
        >>> print(result)
        Cancelled

        If interruptOnCancel is set to true for the job group, then job cancellation will result
        in Thread.interrupt() being called on the job's executor threads. This is useful to help
        ensure that the tasks are actually stopped in a timely manner, but is off by default due
        to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
        """
        self._jsc.setJobGroup(groupId, description, interruptOnCancel)

    def setLocalProperty(self, key, value):
        """
        Set a local property that affects jobs submitted from this thread, such as the
        Spark fair scheduler pool.
        """
        self._jsc.setLocalProperty(key, value)

    def getLocalProperty(self, key):
        """
        Get a local property set in this thread, or null if it is missing. See
        L{setLocalProperty}
        """
        return self._jsc.getLocalProperty(key)

    def setJobDescription(self, value):
        """
        Set a human readable description of the current job.
        """
        self._jsc.setJobDescription(value)

    def sparkUser(self):
        """
        Get SPARK_USER for user who is running SparkContext.
        """
        return self._jsc.sc().sparkUser()

    def cancelJobGroup(self, groupId):
        """
        Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
        for more information.
        """
        self._jsc.sc().cancelJobGroup(groupId)

    def cancelAllJobs(self):
        """
        Cancel all jobs that have been scheduled or are running.
        """
        self._jsc.sc().cancelAllJobs()

    def statusTracker(self):
        """
        Return :class:`StatusTracker` object
        """
        return StatusTracker(self._jsc.statusTracker())

    def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
        """
        Executes the given partitionFunc on the specified set of partitions,
        returning the result as an array of elements.

        If 'partitions' is not specified, this will run over all partitions.

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part])
        [0, 1, 4, 9, 16, 25]

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
        [0, 1, 16, 25]
        """
        if partitions is None:
            partitions = range(rdd._jrdd.partitions().size())

        # Implementation note: This is implemented as a mapPartitions followed
        # by runJob() in order to avoid having to pass a Python lambda into
        # SparkContext#runJob.
        mappedRDD = rdd.mapPartitions(partitionFunc)
        port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
        return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))

    def show_profiles(self):
        """ Print the profile stats to stdout """
        self.profiler_collector.show_profiles()

    def dump_profiles(self, path):
        """ Dump the profile stats into directory `path`
        """
        self.profiler_collector.dump_profiles(path)

    def getConf(self):
        conf = SparkConf()
        conf.setAll(self._conf.getAll())
        return conf
Exemplo n.º 51
0
 def setUpClass(cls):
     conf = SparkConf()
     conf.set('spark.ui.showConsoleProgress', 'false')
     cls.sc = EsSparkContext(conf=conf.setAppName("PySpark Elastic Test"))