Workflow saved
This commit is contained in:
@@ -299,6 +299,13 @@ failed_payments_mapper_df.createOrReplaceTempView("failed_payments_mapper_df")
|
|||||||
|
|
||||||
# %%
|
# %%
|
||||||
|
|
||||||
|
print(failed_payments_mapper_df.columns)
|
||||||
|
final_failed_payments_df = spark.sql("select * from failed_payments_mapper_df where payment_date >= COALESCE((SELECT MAX(DATE(payment_date)) FROM dremio.failedpaymentmetrics), (SELECT MIN(payment_date) FROM failed_payments_mapper_df))")
|
||||||
|
final_failed_payments_df.createOrReplaceTempView('final_failed_payments_df')
|
||||||
|
final_failed_payments_df.persist()
|
||||||
|
|
||||||
|
# %%
|
||||||
|
|
||||||
print(final_failed_payments_df.columns)
|
print(final_failed_payments_df.columns)
|
||||||
filter__13_df = spark.sql("select * from final_failed_payments_df where gateway = \'CCS\'")
|
filter__13_df = spark.sql("select * from final_failed_payments_df where gateway = \'CCS\'")
|
||||||
filter__13_df.createOrReplaceTempView('filter__13_df')
|
filter__13_df.createOrReplaceTempView('filter__13_df')
|
||||||
@@ -472,10 +479,3 @@ _merge_query = '''
|
|||||||
spark.sql(_merge_query)
|
spark.sql(_merge_query)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %%
|
|
||||||
|
|
||||||
print(FailedPaymentsData_df.columns)
|
|
||||||
LatestFailedPayments_df = spark.sql("select * from FailedPaymentsData_df where payment_date >= COALESCE((SELECT MAX(DATE(payment_date)) FROM dremio.failedpaymentmetrics), (SELECT MIN(payment_date) FROM failed_payments_mapper_df))")
|
|
||||||
final_failed_payments_df.createOrReplaceTempView('final_failed_payments_df')
|
|
||||||
final_failed_payments_df.persist()
|
|
||||||
|
|||||||
@@ -354,7 +354,7 @@ def failed_payments_mapper(failed_payments_reader_df, job_id, spark):
|
|||||||
|
|
||||||
failed_payments_mapper_df=spark.sql(("SELECT " + ', '.join(_failed_payments_mapper_select_clause) + " FROM failed_payments_reader_df").replace("{job_id}",f"'{job_id}'"))
|
failed_payments_mapper_df=spark.sql(("SELECT " + ', '.join(_failed_payments_mapper_select_clause) + " FROM failed_payments_reader_df").replace("{job_id}",f"'{job_id}'"))
|
||||||
failed_payments_mapper_df.createOrReplaceTempView("failed_payments_mapper_df")
|
failed_payments_mapper_df.createOrReplaceTempView("failed_payments_mapper_df")
|
||||||
return
|
return (failed_payments_mapper_df,)
|
||||||
|
|
||||||
|
|
||||||
@app.cell
|
@app.cell
|
||||||
@@ -570,17 +570,13 @@ def success_payment_metrics_writer(spark, success_payment_metrics_df):
|
|||||||
|
|
||||||
|
|
||||||
@app.cell
|
@app.cell
|
||||||
def final_failed_payments(
|
def final_failed_payments(failed_payments_mapper_df, spark):
|
||||||
FailedPaymentsData_df,
|
|
||||||
final_failed_payments_df,
|
|
||||||
spark,
|
|
||||||
):
|
|
||||||
|
|
||||||
print(FailedPaymentsData_df.columns)
|
print(failed_payments_mapper_df.columns)
|
||||||
LatestFailedPayments_df = spark.sql("select * from FailedPaymentsData_df where payment_date >= COALESCE((SELECT MAX(DATE(payment_date)) FROM dremio.failedpaymentmetrics), (SELECT MIN(payment_date) FROM failed_payments_mapper_df))")
|
final_failed_payments_df = spark.sql("select * from failed_payments_mapper_df where payment_date >= COALESCE((SELECT MAX(DATE(payment_date)) FROM dremio.failedpaymentmetrics), (SELECT MIN(payment_date) FROM failed_payments_mapper_df))")
|
||||||
final_failed_payments_df.createOrReplaceTempView('final_failed_payments_df')
|
final_failed_payments_df.createOrReplaceTempView('final_failed_payments_df')
|
||||||
final_failed_payments_df.persist()
|
final_failed_payments_df.persist()
|
||||||
return
|
return (final_failed_payments_df,)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user