Workflow saved
This commit is contained in:
@@ -266,9 +266,9 @@ for _gs in _grouping_specs:
|
||||
_partials.append(_gdf)
|
||||
|
||||
|
||||
high_valued_payments_df = reduce(lambda a, b: a.unionByName(b), _partials)
|
||||
high_valued_payments___df = reduce(lambda a, b: a.unionByName(b), _partials)
|
||||
|
||||
high_valued_payments_df.createOrReplaceTempView('high_valued_payments_df')
|
||||
high_valued_payments___df.createOrReplaceTempView('high_valued_payments___df')
|
||||
|
||||
|
||||
|
||||
@@ -431,7 +431,7 @@ spark.sql(_merge_query)
|
||||
|
||||
print(total_payments_and_total_value_processed_df.columns)
|
||||
print(most_used_payment_method___df.columns)
|
||||
print(high_valued_payments_df.columns)
|
||||
print(high_valued_payments___df.columns)
|
||||
print(total_failed_payments___df.columns)
|
||||
|
||||
success_payment_metrics_df = spark.sql("""
|
||||
@@ -447,7 +447,7 @@ FULL OUTER JOIN total_payments_and_total_value_processed_df a
|
||||
ON a.payment_date = d.payment_date
|
||||
LEFT JOIN most_used_payment_method___df b
|
||||
ON a.payment_date = b.payment_date
|
||||
LEFT JOIN high_valued_payments_df c
|
||||
LEFT JOIN high_valued_payments___df c
|
||||
ON a.payment_date = c.payment_date
|
||||
""")
|
||||
|
||||
|
||||
@@ -274,7 +274,7 @@ def most_used_payment_method__(filter__6_df, job_id, spark):
|
||||
|
||||
|
||||
@app.cell
|
||||
def high_valued_payments(
|
||||
def high_valued_payments__(
|
||||
expr,
|
||||
high_valued_payments_filter_df,
|
||||
lit,
|
||||
@@ -318,14 +318,14 @@ def high_valued_payments(
|
||||
_partials.append(_gdf)
|
||||
|
||||
|
||||
high_valued_payments_df = reduce(lambda a, b: a.unionByName(b), _partials)
|
||||
high_valued_payments___df = reduce(lambda a, b: a.unionByName(b), _partials)
|
||||
|
||||
high_valued_payments_df.createOrReplaceTempView('high_valued_payments_df')
|
||||
high_valued_payments___df.createOrReplaceTempView('high_valued_payments___df')
|
||||
|
||||
|
||||
|
||||
|
||||
return (high_valued_payments_df,)
|
||||
return (high_valued_payments___df,)
|
||||
|
||||
|
||||
@app.cell
|
||||
@@ -516,7 +516,7 @@ def data_writer__15(failed_payment_metrics_df, spark):
|
||||
|
||||
@app.cell
|
||||
def success_payment_metrics(
|
||||
high_valued_payments_df,
|
||||
high_valued_payments___df,
|
||||
most_used_payment_method___df,
|
||||
spark,
|
||||
total_failed_payments___df,
|
||||
@@ -525,7 +525,7 @@ def success_payment_metrics(
|
||||
|
||||
print(total_payments_and_total_value_processed_df.columns)
|
||||
print(most_used_payment_method___df.columns)
|
||||
print(high_valued_payments_df.columns)
|
||||
print(high_valued_payments___df.columns)
|
||||
print(total_failed_payments___df.columns)
|
||||
|
||||
success_payment_metrics_df = spark.sql("""
|
||||
@@ -541,7 +541,7 @@ def success_payment_metrics(
|
||||
ON a.payment_date = d.payment_date
|
||||
LEFT JOIN most_used_payment_method___df b
|
||||
ON a.payment_date = b.payment_date
|
||||
LEFT JOIN high_valued_payments_df c
|
||||
LEFT JOIN high_valued_payments___df c
|
||||
ON a.payment_date = c.payment_date
|
||||
""")
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user