def to_spark():
    st = time.time()

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    df: DataFrame = spark_session.read.json("json/city_json.json")
    df.show()

    df.createOrReplaceTempView("table_1")

    df2: DataFrame = spark.sql("SELECT result from table_1")

    df2.show()

    rdd: RDD = df2.toJSON()
    print(rdd.count())

    first_elem: str = rdd.first()

    print(first_elem[0:5000:1], len(first_elem))
    dict_root: dict = json.loads(first_elem)
    print()
    print(dict_root["result"].keys())
    print(dict_root["result"]["records"].pop())
    cities: List[Dict[str, str, str, str, str, str, str, str, str,
                      int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    citiesDF.printSchema()
    citiesDF.show(truncate=False)
    from datetime import date
    today = date.today()
    print("Today's date:", today)
    print(type(citiesDF.filter(citiesDF.Date >= date.today())))
    filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= "2021-01-09")
    filteresDF.show()
    # print(type(filteresDF.collect()))
    final_result: List[Dict] = filteresDF.collect()

    Constants.db.update({"cities_2": final_result})  # load to firebase

    # from testsAndOthers.data_types_and_structures import DataTypesHandler, PrintForm
    # DataTypesHandler.print_data_recursively(data=json_count_names, print_dict=PrintForm.PRINT_DICT.value)

    logging.debug(f"spark total time: {time.time() - st} seconds")
Пример #2
0
numIterations = 1000
model = SVMWithSGD.train(train, numIterations)


def Getpoint(point):
    score = model.predict(point.features)
    return str(score) + " " + str(point.label)


model.clearThreshold()
scoreAndLabels = test.map(lambda point: Getpoint(point))

# scoreAndLabels.foreach(lambda x : print(x))
# model.setThreshold(0.0)
# scoreAndLabels.foreach(lambda x : print(x))

rebuyRDD = scoreAndLabels.map(lambda x: x.split(" "))
schema = StructType([
    StructField("score", StringType(), True),
    StructField("label", StringType(), True)
])
rowRDD = rebuyRDD.map(lambda p: Row(p[0].strip(), p[1].strip()))
rebuyDF = spark.createDataFrame(rowRDD, schema, True)
prop = {}
prop['user'] = '******'
prop['password'] = '******'
prop['driver'] = "com.mysql.jdbc.Driver"
rebuyDF.write.jdbc("jdbc:mysql://localhost:3306/dbtaobao", 'dbtaobao.rebuy',
                   'append', prop)
def to_spark():
    st = time.time()



    # json_count_names: dict = Spark_handler.pass_to_spark(
    #     file_path=f"{HDFS_handler.DEFAULT_CLUSTER_PATH}{HDFS_handler.HADOOP_USER}/{CITY_JSON}",
    #     process_fn=process_data
    # )

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    # print(spark_session.read.json("json/city_json.json"))
    df: DataFrame = spark_session.read.json("json/city_json.json")
    # df.printSchema()
    df.show()
    # cities: Column = df.result.records
    # cities: Column = df["result"]["records"]
    #
    # # df.select(df.result.records.getField("City_Name")).show()
    # df.select(cities.getField("City_Name")).show()
    # df.select(cities("City_Name")).show()
    df.createOrReplaceTempView("table_1")

    df2: DataFrame = spark.sql("SELECT result from table_1")
    # print(df2.collect())
    # df2.printSchema()
    # df2.createOrReplaceTempView("table_2")
    #
    # df3: DataFrame = spark.sql("SELECT result FROM table_2 WHERE result='records'")
    # df3.printSchema()
    # df3.show()
    # # print(df3.collect())
    # # print(df3.select("records"))
    # df3.createOrReplaceTempView("table_3")
    #
    # cities_records: DataFrame = spark.sql("SELECT * from table_3")
    # cities_records.printSchema()
    df2.show()
    # results: list = df2.collect()
    # res: Row = results.pop()
    # print(res)
    rdd: RDD = df2.toJSON()
    print(rdd.count())
    # print(type(rdd.first()))
    first_elem: str = rdd.first()
    # rdd.flatMapValues()
    print(first_elem[0:5000:1], len(first_elem))
    dict_root: dict = json.loads(first_elem)
    print()
    print(dict_root["result"].keys())
    print(dict_root["result"]["records"].pop())
    cities: List[Dict[str, str, str, str, str, str, str, str, str, int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    citiesDF.printSchema()
    citiesDF.show(truncate=False)
    from datetime import date
    today = date.today()
    print("Today's date:", today)
    print(type(citiesDF.filter(citiesDF.Date >= date.today())))
    filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= "2021-01-09")
    filteresDF.show()


    # from testsAndOthers.data_types_and_structures import DataTypesHandler, PrintForm
    # DataTypesHandler.print_data_recursively(data=json_count_names, print_dict=PrintForm.PRINT_DICT.value)

    logging.debug(f"spark total time: {time.time() - st} seconds")
Пример #4
0
    #   B = broadcast(B)

    B.registerTempTable("B")
    return spark.sql("""
       SELECT
           A.row row,
           B.col col,
           SUM(A.val * B.val) val
       FROM
           A
       JOIN B ON A.col = B.row
       GROUP BY A.row, B.col
       """)


COO_MATRIX_SCHEMA = StructType([
    StructField('row', LongType()),
    StructField('col', LongType()),
    StructField('val', DoubleType())
])

mat_a = spark.sparkContext.parallelize([(0, 1, 1.0), (0, 4, 9.0), (1, 2, 3.0),
                                        (2, 3, 2.0)])
mat_b = spark.sparkContext.parallelize([(0, 0, 1.0), (2, 1, 7.0), (3, 2, 2.0)])

A = spark.createDataFrame(mat_a, COO_MATRIX_SCHEMA)
B = spark.createDataFrame(mat_b, COO_MATRIX_SCHEMA)

result = multiply_matrices(spark, A, B).collect()
print(result)
Пример #5
0
from pyspark.ml.fpm import FPGrowth
from pyspark.python.pyspark.shell import spark

df = spark.createDataFrame([(110, ["A", "B", "C", "F", "G", "H"]),
                            (111, ["A", "B", "C", "E", "G"]),
                            (112, ["C", "E", "F", "H"]),
                            (113, ["A", "B", "C", "G", "H"]),
                            (114, ["C", "D", "E", "H"]),
                            (115, ["B", "C", "E", "G"]),
                            (116, ["A", "B", "C", "D", "G", "H"]),
                            (117, ["B", "C", "E", "G"]),
                            (118, ["A", "C", "G", "H"]),
                            (119, ["A", "B", "C", "D", "E", "G", "H"])],
                           ["id", "items"])

if __name__ == "__main__":
    fpGrowth = FPGrowth(itemsCol="items", minSupport=.0, minConfidence=.0)
    model = fpGrowth.fit(df)

    # Display frequent itemsets.
    model.freqItemsets.show()

    # Display generated association rules.
    model.associationRules.show()

    # transform examines the input items against all the association rules and summarize the
    # consequents as prediction
    model.transform(df).show()
Пример #6
0
    # df = spark.createDataFrame([
    #     (110, ["A", "B", "C", "F", "G", "H"]),
    #     (111, ["A", "B", "C", "E", "G"]),
    #     (112, ["C", "E", "F", "H"]),
    #     (113, ["A", "B", "C", "G", "H"]),
    #     (114, ["C", "D", "E", "H"]),
    #     (115, ["B", "C", "E", "G"]),
    #     (116, ["A", "B", "C", "D", "G", "H"]),
    #     (117, ["B", "C", "E", "G"]),
    #     (118, ["A", "C", "G", "H"]),
    #     (119, ["A", "B", "C", "D", "E", "G", "H"])
    # ], ["id", "items"])

    print('Count of "P":', count_of_element(itemSetList, 'P'))

    df = spark.createDataFrame([(x[0], x[1:]) for x in itemSetList],
                               ["id", "items"])

    fpGrowth = FPGrowth(itemsCol="items", minSupport=.6, minConfidence=.0)
    model = fpGrowth.fit(df)

    # Display frequent itemsets.
    model.freqItemsets.show()

    # Display generated association rules.
    # model.associationRules.show()

    # transform examines the input items against all the association rules and summarize the
    # consequents as prediction
    model.transform(df).show()
def to_spark_direct_upside_down(cities: dict):
    st = time.time()

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    # cities: List[Dict[str, str, str, str, str, str, str, str, str, int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    citiesDF.printSchema()
    citiesDF.show(truncate=False)
    # from datetime import date
    # today = date.today()
    # print("Today's date:", today)
    # print(type(citiesDF.filter(citiesDF.Date >= date.today())))
    # filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= "2021-01-09")
    from datetime import datetime, timedelta
    result_length: int = 0
    day: datetime = datetime.now()  # today
    while result_length == 0:
        # print(type(day))
        day_str: str = datetime.strftime(day, '%Y-%m-%d')
        filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= day_str)
        filteresDF.show()
        filteresDF.describe().show()
        schema = filteresDF.columns
        logging.debug(schema)
        # print(type(filteresDF.collect()))
        final_result: List[Dict] = filteresDF.collect()
        # print(filteresDF.select("*"))
        # print(datetime.strftime(day, '%Y-%m-%d'), type(datetime.strftime(day, '%Y-%m-%d')))
        logging.debug(f'{day_str} {type(day_str)}')
        logging.debug("Empty RDD: %s" % (final_result.__len__() == 0))
        day = day - timedelta(1)  # timedelta() indicates how many days ago
        result_length = final_result.__len__()
    # print(type(filteresDF.rdd.mapValues(lambda city: (city["City_Name"], city["City_Code"]))))
    # print(filteresDF.rdd.mapValues(lambda city: (city["City_Name"], city["City_Code"])).collectAsMap())
    # print(filteresDF.select(explode(filteresDF.rdd.collectAsMap())))
    # print(explode(filteresDF))
    # print(filteresDF.toPandas().to_dict())
    # my_dict2 = {y: x for x, y in filteresDF.toPandas().to_dict().items()}
    # print(my_dict2)
    # rdd: pyspark.rdd.RDD = sc.parallelize(list)
    def append_json(row: Row):
        # for item in row.asDict(recursive=True).items():
        # my_dict2 = {y: x for x, y in row.asDict(recursive=True).items()}
        # print(my_dict2)
        # print(row.asDict(recursive=True))
        return {row["City_Name"]: row.asDict()}  # {"counter": row.asDict()}
        # Constants.json_rows.append({
        #     "City_Code": row["City_Code"],
        #     "City_Name": row.City_Code,
        #     "Cumulated_deaths": row[2]
        # })

    filteresDF.foreach(append_json)
    cities_final_df: DataFrame = spark.createDataFrame(
        data=filteresDF.rdd.map(append_json).collect())
    # cities_final_df.show()
    #               print(cities_final_df.toPandas().to_dict())
    # print(Constants.json_rows)
    # filteresDF.show()
    # filteresDF.foreachPartition(lambda x: print(x))
    #  metric: Column = create_map(filteresDF.columns)
    #  metric: Column = create_map([
    #      filteresDF.City_Name,
    #      [
    #          filteresDF.City_Code,
    #          filteresDF.Cumulated_deaths,
    #          filteresDF.Cumulated_number_of_diagnostic_tests,
    #          filteresDF.Cumulated_number_of_tests,
    #          filteresDF.Cumulated_recovered,
    #          filteresDF.Cumulated_vaccinated,
    #          filteresDF["Cumulative_verified_cases"],
    #          filteresDF.Date,
    #          filteresDF["_id"]
    #      ]
    #  ])
    #  print(filteresDF.select(explode(metric)))
    #  filteresDF.select(explode(metric)).show()
    #  filteresDF.select(create_map(filteresDF.columns).alias("map")).show()

    Constants.db.update({
        "cities_3": {
            "schema": schema,
            "data": final_result,
            "filteresDF": filteresDF.toJSON().collect(),
            # "ok_3": json.loads(str(dict({"data": filteresDF.toJSON().collect()}))),
            # "ok_5": json.load(filteresDF.toJSON().collect()),
            # "ok": filteresDF.toJSON().keys(),
            # "ok_2": filteresDF.toJSON().collectAsMap(),
            "ok": filteresDF.toPandas().to_dict(),
            "shit": cities_final_df.toPandas().to_dict()
        }
    })  # load to firebase

    Constants.db.update({"cities_final": cities_final_df.toPandas().to_dict()
                         })  # load to firebase
    # ---------------------------------------------------------------------------------
    cities_final_df.summary()

    # Constants.db.update(
    #     {
    #         "israel": {
    #             # "sum": cities_final_df.rdd.sum().__str__(),
    #             "summarize": filteresDF.describe(filteresDF.columns).toPandas().to_dict(),
    #             # "summarize_2": {
    #             #     "City_Code": filteresDF.describe(filteresDF.City_Code).toPandas().to_dict(),
    #             #     "City_Name": filteresDF.describe(filteresDF.City_Name).toPandas().to_dict(),
    #             #     "Cumulated_deaths": filteresDF.describe(filteresDF.Cumulated_deaths).toPandas().to_dict(),
    #             #     "Cumulated_number_of_diagnostic_tests": filteresDF
    #             #         .describe(filteresDF.Cumulated_number_of_diagnostic_tests).toPandas().to_dict(),
    #             #     "Cumulated_number_of_tests": filteresDF
    #             #         .describe(filteresDF.Cumulated_number_of_tests).toPandas().to_dict(),
    #             #     "Cumulated_recovered": filteresDF
    #             #         .describe(filteresDF.Cumulated_recovered).toPandas().to_dict(),
    #             #     "Cumulated_vaccinated": filteresDF
    #             #         .describe(filteresDF.Cumulated_vaccinated).toPandas().to_dict(),
    #             #     "Cumulative_verified_cases": filteresDF
    #             #         .describe(filteresDF.Cumulative_verified_cases).toPandas().to_dict(),
    #             #     "Date": filteresDF.describe(filteresDF.Date).toPandas().to_dict(),
    #             #     "_id": filteresDF.describe(filteresDF["_id"]).toPandas().to_dict(),
    #             # }
    #         }
    #     }
    # )  # load to firebase

    # acc_vaccinated = spark.sparkContext.accumulator(0)
    # acc_vaccinated_less_than_15 = spark.sparkContext.accumulator(0)
    #
    # def count_israel_total(row: Row, acc_vaccinated_internal: Accumulator,
    #                        acc_vaccinated_less_than_15_internal: Accumulator):
    #     # print(row.Cumulated_vaccinated)
    #     if row.Cumulated_vaccinated.isdigit():
    #         acc_vaccinated_internal += int(row.Cumulated_vaccinated)
    #     else:
    #         print(row.Cumulated_vaccinated, type(row.Cumulated_vaccinated))
    #         acc_vaccinated_less_than_15_internal += 1
    #
    # filteresDF.foreach(lambda row: count_israel_total(row, acc_vaccinated, acc_vaccinated_less_than_15))
    # print(acc_vaccinated.value)
    # print(acc_vaccinated_less_than_15.value)
    #
    # Constants.db.update(
    #     {
    #         "israel": {
    #             "Cumulated_vaccinated": acc_vaccinated.value,
    #             "Cumulated_vaccinated_<15": acc_vaccinated_less_than_15.value
    #         }
    #     }
    # )  # load to firebase

    accum_dict: Dict[Dict[str,
                          Accumulator]] = {
                              "vaccinated": {
                                  "Cumulated_vaccinated_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulated_vaccinated_<15":
                                  spark.sparkContext.accumulator(0)
                              },
                              "dead": {
                                  "Cumulated_deaths_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulated_deaths_<15":
                                  spark.sparkContext.accumulator(0)
                              },
                              "diagnostic_tests": {
                                  "Cumulated_number_of_diagnostic_tests_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulated_number_of_diagnostic_tests_<15":
                                  spark.sparkContext.accumulator(0)
                              },
                              "tests": {
                                  "Cumulated_number_of_tests_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulated_number_of_tests_<15":
                                  spark.sparkContext.accumulator(0)
                              },
                              "recovered": {
                                  "Cumulated_recovered_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulated_recovered_<15":
                                  spark.sparkContext.accumulator(0)
                              },
                              "Cumulative_verified_cases": {
                                  "Cumulative_verified_cases_total":
                                  spark.sparkContext.accumulator(0),
                                  "Cumulative_verified_cases_<15":
                                  spark.sparkContext.accumulator(0)
                              }
                          }

    value_accu = spark.sparkContext.accumulator(0)

    def count_israel_total(row: Row, acc_vaccinated_internal: Accumulator,
                           acc_vaccinated_less_than_15_internal: Accumulator):
        # print(row.Cumulated_vaccinated)
        if row.Cumulated_vaccinated.isdigit():
            acc_vaccinated_internal += int(row.Cumulated_vaccinated)
        else:
            print(row.Cumulated_vaccinated, type(row.Cumulated_vaccinated))
            acc_vaccinated_less_than_15_internal += 1

    def switch_accu(key_dict: dict, row: Row, keyname):
        print(
            "----------------------start---------------------------------------"
        )
        print(key_dict, keyname)
        try:
            less_15 = key_dict.popitem()
            cumulated = key_dict.popitem()
            count_israel_total(row, cumulated[1], less_15[1])

            # accum_dict[keyname][cumulated[0]] = cumulated[1].value
            # accum_dict[keyname][less_15[0]] = less_15[1].value
            # print(cumulated[1].value)
            # print(less_15[1].value)
            try:
                print(accum_dict[keyname][cumulated[0]])
            finally:
                print(
                    "---------------------end----------------------------------------"
                )
            # for key2 in accum_dict[keyname].keys():
            #     print(accum_dict[keyname], accum_dict[keyname][key2])
            #     accum_dict[keyname][key2] = accum_dict[keyname][key2].value
        except Exception as e:
            logging.error(e)
        # accum_dict[keyname][cumulated[0]] = cumulated[1].value
        # accum_dict[keyname][less_15[0]] = less_15[1].value

    print(accum_dict)

    for key in accum_dict.keys():
        print(key, accum_dict[key])
        filteresDF.foreach(lambda row: switch_accu(
            key_dict=accum_dict[key], row=row, keyname=key))
        for key2 in accum_dict[key].keys():
            print(accum_dict[key], accum_dict[key][key2].value)
            accum_dict[key][key2] = accum_dict[key][key2].value
    #
    # Constants.db.update(
    #     {
    #         "israel": accum_dict
    #     }
    # )  # load to firebase

    Constants.db.update({
        "israel": {
            str(accum_dict["vaccinated"]): {
                "Cumulated_vaccinated_total":
                accum_dict["vaccinated"]["Cumulated_vaccinated_total"],
                "Cumulated_vaccinated_<15":
                accum_dict["vaccinated"]["Cumulated_vaccinated_<15"]
            },
            str(accum_dict["dead"]): {
                "Cumulated_deaths_total":
                accum_dict["dead"]["Cumulated_deaths_total"],
                "Cumulated_deaths_<15":
                accum_dict["dead"]["Cumulated_deaths_<15"]
            },
            str(accum_dict["diagnostic_tests"]): {
                "Cumulated_number_of_diagnostic_tests_total":
                accum_dict["diagnostic_tests"]
                ["Cumulated_number_of_diagnostic_tests_total"],
                "Cumulated_number_of_diagnostic_tests_<15":
                accum_dict["diagnostic_tests"]
                ["Cumulated_number_of_diagnostic_tests_<15"]
            },
            str(accum_dict["tests"]): {
                "Cumulated_number_of_tests_total":
                accum_dict["tests"]["Cumulated_number_of_tests_total"],
                "Cumulated_number_of_tests_<15":
                accum_dict["tests"]["Cumulated_number_of_tests_<15"]
            },
            str(accum_dict["recovered"]): {
                "Cumulated_recovered_total":
                accum_dict["recovered"]["Cumulated_recovered_total"],
                "Cumulated_recovered_<15":
                accum_dict["recovered"]["Cumulated_recovered_<15"]
            },
            str(accum_dict["Cumulative_verified_cases"]): {
                "Cumulative_verified_cases_total":
                accum_dict["Cumulative_verified_cases"]
                ["Cumulative_verified_cases_total"],
                "Cumulative_verified_cases_<15":
                accum_dict["Cumulative_verified_cases"]
                ["Cumulative_verified_cases_<15"]
            },
        }
    })  # load to firebase

    # from testsAndOthers.data_types_and_structures import DataTypesHandler, PrintForm
    # DataTypesHandler.print_data_recursively(data=json_count_names, print_dict=PrintForm.PRINT_DICT.value)

    logging.debug(f"spark total time: {time.time() - st} seconds")
def to_spark_direct(cities: dict):
    st = time.time()

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    # cities: List[Dict[str, str, str, str, str, str, str, str, str, int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    citiesDF.printSchema()
    citiesDF.show(truncate=False)
    # from datetime import date
    # today = date.today()
    # print("Today's date:", today)
    # print(type(citiesDF.filter(citiesDF.Date >= date.today())))
    # filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= "2021-01-09")
    from datetime import datetime, timedelta
    result_length: int = 0
    day: datetime = datetime.now()  # today
    while result_length == 0:
        # print(type(day))
        day_str: str = datetime.strftime(day, '%Y-%m-%d')
        filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= day_str)
        filteresDF.show()
        filteresDF.describe().show()
        schema = filteresDF.columns
        logging.debug(schema)
        # print(type(filteresDF.collect()))
        final_result: List[Dict] = filteresDF.collect()
        # print(filteresDF.select("*"))
        # print(datetime.strftime(day, '%Y-%m-%d'), type(datetime.strftime(day, '%Y-%m-%d')))
        logging.debug(f'{day_str} {type(day_str)}')
        logging.debug("Empty RDD: %s" % (final_result.__len__() == 0))
        day = day - timedelta(1)  # timedelta() indicates how many days ago
        result_length = final_result.__len__()
    # print(type(filteresDF.rdd.mapValues(lambda city: (city["City_Name"], city["City_Code"]))))
    # print(filteresDF.rdd.mapValues(lambda city: (city["City_Name"], city["City_Code"])).collectAsMap())
    # print(filteresDF.select(explode(filteresDF.rdd.collectAsMap())))
    # print(explode(filteresDF))
    # print(filteresDF.toPandas().to_dict())
    my_dict2 = {y: x for x, y in filteresDF.toPandas().to_dict().items()}
    print(my_dict2)
    #  metric: Column = create_map(filteresDF.columns)
    #  metric: Column = create_map([
    #      filteresDF.City_Name,
    #      [
    #          filteresDF.City_Code,
    #          filteresDF.Cumulated_deaths,
    #          filteresDF.Cumulated_number_of_diagnostic_tests,
    #          filteresDF.Cumulated_number_of_tests,
    #          filteresDF.Cumulated_recovered,
    #          filteresDF.Cumulated_vaccinated,
    #          filteresDF["Cumulative_verified_cases"],
    #          filteresDF.Date,
    #          filteresDF["_id"]
    #      ]
    #  ])
    #  print(filteresDF.select(explode(metric)))
    #  filteresDF.select(explode(metric)).show()
    #  filteresDF.select(create_map(filteresDF.columns).alias("map")).show()

    Constants.db.update({
        "cities_3": {
            "schema": schema,
            "data": final_result,
            "filteresDF": filteresDF.toJSON().collect(),
            # "ok_3": json.loads(str(dict({"data": filteresDF.toJSON().collect()}))),
            # "ok_5": json.load(filteresDF.toJSON().collect()),
            # "ok": filteresDF.toJSON().keys(),
            # "ok_2": filteresDF.toJSON().collectAsMap(),
            "ok": filteresDF.toPandas().to_dict()
        }
    })  # load to firebase

    # from testsAndOthers.data_types_and_structures import DataTypesHandler, PrintForm
    # DataTypesHandler.print_data_recursively(data=json_count_names, print_dict=PrintForm.PRINT_DICT.value)

    logging.debug(f"spark total time: {time.time() - st} seconds")
def to_spark_direct_upside_down(cities: dict):
    st = time.time()

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    # cities: List[Dict[str, str, str, str, str, str, str, str, str, int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    # citiesDF.printSchema()
    # citiesDF.show(truncate=False)

    from datetime import datetime, timedelta
    result_length: int = 0
    day: datetime = datetime.now()  # today
    while result_length == 0:
        day_str: str = datetime.strftime(day, '%Y-%m-%d')
        filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= day_str)

        schema = filteresDF.columns

        final_result: List[Dict] = filteresDF.collect()

        logging.debug(f'{day_str} {type(day_str)}')
        logging.debug("Empty RDD: %s" % (final_result.__len__() == 0))
        day = day - timedelta(1)  # timedelta() indicates how many days ago
        result_length = final_result.__len__()

    # filteresDF.show()
    # filteresDF.describe().show()
    # logging.debug(schema)

    def append_json(row: Row):
        return {row["City_Name"]: row.asDict()}  # {"counter": row.asDict()}

    filteresDF.foreach(append_json)
    cities_final_df: DataFrame = spark.createDataFrame(
        data=filteresDF.rdd.map(append_json).collect())

    Constants.db.update({
        "cities_3": {
            "schema": schema,
            "data": final_result,
            "filteresDF": filteresDF.toJSON().collect(),
            "ok": filteresDF.toPandas().to_dict()
        }
    })  # load to firebase

    Constants.db.update({"cities_final": cities_final_df.toPandas().to_dict()
                         })  # load to firebase

    total: Accumulator = spark.sparkContext.accumulator(0)
    less: Accumulator = spark.sparkContext.accumulator(0)
    keys_lst: list = []
    updated_to: str = ''

    def add(row: Row, total: Accumulator, less: Accumulator):
        for val in row.asDict().values():
            if str(val).isdigit():
                total += int(val)
            elif val == "<15":
                less += 1

    for key in filteresDF.toPandas().keys():
        if key == 'Date':
            print(filteresDF.rdd.first().Date)
            updated_to = filteresDF.rdd.first().Date
            continue
        elif key in ["_id", "City_Name", "City_Code"]:
            continue
        print(key)
        filteresDF.select(
            filteresDF[key]).foreach(lambda row: add(row, total, less))
        print(total.value)
        print(less.value)
        keys_lst.append(
            {str(key): {
                 "total": total.value,
                 "less_<15": less.value
             }})
        total.value = 0
        less.value = 0

    keys_lst.append(updated_to)
    Constants.db.update({"israel_final_3": keys_lst})

    Constants.db.update({"israel_final_2": [keys_lst, updated_to]})

    Constants.db.update(
        {"israel_final": {
            "data": keys_lst,
            "Last_Update": updated_to
        }})

    logging.debug(f"spark total time: {time.time() - st} seconds")
Пример #10
0
from pyspark.ml.feature import HashingTF, IDF, Tokenizer
from pyspark.python.pyspark.shell import spark

sentenceData = spark.createDataFrame(
    [(0.0, "Hi I heard about Spark"),
     (0.0, "I wish Java could use case classes"),
     (1.0, "Logistic regression models are neat"), (1.0, "I love it"),
     (0.0, "I know it")], ["label", "sentence"])

tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
wordsData = tokenizer.transform(sentenceData)

hashingTF = HashingTF(inputCol="words",
                      outputCol="rawFeatures",
                      numFeatures=10)
featurizedData = hashingTF.transform(wordsData)
# alternatively, CountVectorizer can also be used to get term frequency vectors

idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)

rescaledData.select("label", "features").show()
def to_spark_direct_upside_down(cities: dict):
    st = time.time()

    spark_session = SparkSession \
        .builder \
        .enableHiveSupport() \
        .getOrCreate()

    # cities: List[Dict[str, str, str, str, str, str, str, str, str, int]] = dict_root["result"]["records"]
    citiesDF: DataFrame = spark.createDataFrame(data=cities)
    # citiesDF.printSchema()
    # citiesDF.show(truncate=False)

    from datetime import datetime, timedelta
    result_length: int = 0
    day: datetime = datetime.now()  # today
    while result_length == 0:
        day_str: str = datetime.strftime(day, '%Y-%m-%d')
        filteresDF: DataFrame = citiesDF.filter(citiesDF.Date >= day_str)
        # filteresDF.show()
        # filteresDF.describe().show()
        schema = filteresDF.columns
        # logging.debug(schema)

        final_result: List[Dict] = filteresDF.collect()

        logging.debug(f'{day_str} {type(day_str)}')
        logging.debug("Empty RDD: %s" % (final_result.__len__() == 0))
        day = day - timedelta(1)  # timedelta() indicates how many days ago
        result_length = final_result.__len__()

    def append_json(row: Row):
        return {row["City_Name"]: row.asDict()}  # {"counter": row.asDict()}

    filteresDF.foreach(append_json)
    cities_final_df: DataFrame = spark.createDataFrame(data=filteresDF.rdd.map(append_json).collect())
    # print(cities_final_df.toPandas().to_dict())

    Constants.db.update(
        {
            "cities_3": {
                "schema": schema,
                "data": final_result,
                "filteresDF": filteresDF.toJSON().collect(),
                "ok": filteresDF.toPandas().to_dict(),
                "shit": cities_final_df.toPandas().to_dict()
            }
        }
    )  # load to firebase

    Constants.db.update(
        {"cities_final": cities_final_df.toPandas().to_dict()}
    )  # load to firebase
    # ---------------------------------------------------------------------------------
    # cities_final_df.summary()

    # print(filteresDF.toPandas().to_dict()["Cumulated_vaccinated"])
    # print(filteresDF.Cumulated_vaccinated)
    # vaccinated: Column = filteresDF.Cumulated_vaccinated
    # vaccinated_df: DataFrame = spark.createDataFrame(data=vaccinated)
    # vaccinated_df.show()

    # filteresDF.select(filteresDF.Cumulated_vaccinated).show()

    total: Accumulator = spark.sparkContext.accumulator(0)
    less: Accumulator = spark.sparkContext.accumulator(0)
    keys_lst: list = []
    updated_to: str = ''

    def add(row: Row, total: Accumulator, less: Accumulator):
        # print(row.asDict().values())
        for val in row.asDict().values():
            if str(val).isdigit():
                total += int(val)
            elif val == "<15":
                less += 1

    for key in filteresDF.toPandas().keys():
        if key == 'Date':
            # print(filteresDF[key].getItem(0).astype("strןמע"))
            print(filteresDF.rdd.first().Date)
            updated_to = filteresDF.rdd.first().Date
            continue
        elif key in ["_id", "City_Name", "City_Code"]: continue
        print(key)
        filteresDF.select(filteresDF[key]).foreach(lambda row: add(row, total, less))
        print(total.value)
        print(less.value)
        keys_lst.append(
            {
                str(key): {
                    "total": total.value,
                    "less_<15": less.value
                }
            }
        )
        total.value = 0
        less.value = 0

    keys_lst.append(updated_to)
    Constants.db.update(
        {
            "israel_final_3": keys_lst
        }
    )

    Constants.db.update(
        {
            "israel_final_2": [
                keys_lst, updated_to
            ]
        }
    )

    Constants.db.update(
        {
            "israel_final": {
                "data": keys_lst,
                "Last_Update": updated_to
            }
        }
    )

    # accum_dict: Dict[Dict[str, Accumulator]] = {
    #     "vaccinated": {
    #         "Cumulated_vaccinated_total": spark.sparkContext.accumulator(0),
    #         "Cumulated_vaccinated_<15": spark.sparkContext.accumulator(0)
    #     },
    #     "dead": {
    #         "Cumulated_deaths_total": spark.sparkContext.accumulator(0),
    #         "Cumulated_deaths_<15": spark.sparkContext.accumulator(0)
    #     },
    #     "diagnostic_tests": {
    #         "Cumulated_number_of_diagnostic_tests_total": spark.sparkContext.accumulator(0),
    #         "Cumulated_number_of_diagnostic_tests_<15": spark.sparkContext.accumulator(0)
    #     },
    #     "tests": {
    #         "Cumulated_number_of_tests_total": spark.sparkContext.accumulator(0),
    #         "Cumulated_number_of_tests_<15": spark.sparkContext.accumulator(0)
    #     },
    #     "recovered": {
    #         "Cumulated_recovered_total": spark.sparkContext.accumulator(0),
    #         "Cumulated_recovered_<15": spark.sparkContext.accumulator(0)
    #     },
    #     "Cumulative_verified_cases": {
    #         "Cumulative_verified_cases_total": spark.sparkContext.accumulator(0),
    #         "Cumulative_verified_cases_<15": spark.sparkContext.accumulator(0)
    #     }
    # }
    #
    # def count_israel_total(row: Row, acc_vaccinated_internal: Accumulator,
    #                        acc_vaccinated_less_than_15_internal: Accumulator):
    #     # print(row)
    #     for city in row.asDict().values():
    #         print(city)
    #     if row.Cumulated_vaccinated.isdigit():
    #         acc_vaccinated_internal += int(row.Cumulated_vaccinated)
    #         # print(f"------------------{int(row.Cumulated_vaccinated)}-----------------------")
    #     else:
    #         print(row.Cumulated_vaccinated, type(row.Cumulated_vaccinated))
    #         print("------------------<15-----------------------")
    #         acc_vaccinated_less_than_15_internal += 1
    #
    # def switch_accu(key_dict: dict, row: Row, keyname):
    #     # print(key_dict, keyname)
    #     try:
    #         less_15 = key_dict.popitem()
    #         cumulated = key_dict.popitem()
    #         count_israel_total(row, cumulated[1], less_15[1])
    #
    #     except Exception as e:
    #         # logging.error(e)
    #         pass
    #     finally:
    #         # print()
    #         pass
    #
    # # print(accum_dict)
    #
    # for key in accum_dict.keys():
    #     # print(key, accum_dict[key])
    #     cities_final_df.foreach(lambda row: switch_accu(key_dict=accum_dict[key], row=row, keyname=key))
    #
    #     for key2 in accum_dict[key].keys():
    #         # print(accum_dict[key], accum_dict[key][key2].value)
    #         accum_dict[key][key2] = accum_dict[key][key2].value
    #
    # Constants.db.update(
    #     {
    #         "israel": {
    #             str(accum_dict["vaccinated"]): {
    #                 "Cumulated_vaccinated_total": accum_dict["vaccinated"]["Cumulated_vaccinated_total"],
    #                 "Cumulated_vaccinated_<15": accum_dict["vaccinated"]["Cumulated_vaccinated_<15"]
    #             },
    #             str(accum_dict["dead"]): {
    #                 "Cumulated_deaths_total": accum_dict["dead"]["Cumulated_deaths_total"],
    #                 "Cumulated_deaths_<15": accum_dict["dead"]["Cumulated_deaths_<15"]
    #             },
    #             str(accum_dict["diagnostic_tests"]): {
    #                 "Cumulated_number_of_diagnostic_tests_total":
    #                     accum_dict["diagnostic_tests"]["Cumulated_number_of_diagnostic_tests_total"],
    #                 "Cumulated_number_of_diagnostic_tests_<15":
    #                     accum_dict["diagnostic_tests"]["Cumulated_number_of_diagnostic_tests_<15"]
    #             },
    #             str(accum_dict["tests"]): {
    #                 "Cumulated_number_of_tests_total": accum_dict["tests"]["Cumulated_number_of_tests_total"],
    #                 "Cumulated_number_of_tests_<15": accum_dict["tests"]["Cumulated_number_of_tests_<15"]
    #             },
    #             str(accum_dict["recovered"]): {
    #                 "Cumulated_recovered_total": accum_dict["recovered"]["Cumulated_recovered_total"],
    #                 "Cumulated_recovered_<15": accum_dict["recovered"]["Cumulated_recovered_<15"]
    #             },
    #             str(accum_dict["Cumulative_verified_cases"]): {
    #                 "Cumulative_verified_cases_total":
    #                     accum_dict["Cumulative_verified_cases"]["Cumulative_verified_cases_total"],
    #                 "Cumulative_verified_cases_<15":
    #                     accum_dict["Cumulative_verified_cases"]["Cumulative_verified_cases_<15"]
    #             },
    #         }
    #     }
    # )  # load to firebase

    # from testsAndOthers.data_types_and_structures import DataTypesHandler, PrintForm
    # DataTypesHandler.print_data_recursively(data=json_count_names, print_dict=PrintForm.PRINT_DICT.value)

    logging.debug(f"spark total time: {time.time() - st} seconds")