Workflow saved

This commit is contained in:
unknown
2025-08-29 09:09:09 +00:00
parent ab9c61fddd
commit b255836d13
3 changed files with 78 additions and 4 deletions

View File

@@ -77,7 +77,7 @@ failed_payments_reader_df.createOrReplaceTempView('failed_payments_reader_df')
# %%
print(failed_payments_reader_df.columns)
failed_payments_filter_df = spark.sql("select * from failed_payments_reader_df where retry_attempt_count < 3 AND gateway = \'CCS\' AND (retry_status = \'new\' OR retry_status = \'failed\') and payment_id = \'pi_3RvDQqP0JqbrujP90uuhk13h\'")
failed_payments_filter_df = spark.sql("select * from failed_payments_reader_df where retry_attempt_count < 3 AND gateway = \'CCS\' AND (retry_status = \'new\' OR retry_status = \'failed\')")
failed_payments_filter_df.createOrReplaceTempView('failed_payments_filter_df')
# %%
@@ -232,3 +232,37 @@ _success_payments_mapper_select_clause.append("CAST(amount AS DECIMAL(10,2)) AS
success_payments_mapper_df=spark.sql(("SELECT " + ', '.join(_success_payments_mapper_select_clause) + " FROM success_payment_filter_df").replace("{job_id}",f"'{job_id}'"))
success_payments_mapper_df.createOrReplaceTempView("success_payments_mapper_df")
# %%
_failed_payments_update_writer_fields_to_update = failed_payments_update_mapper_df.columns
_failed_payments_update_writer_set_clause=[]
_failed_payments_update_writer_unique_key_clause= []
for _key in ['payment_id']:
_failed_payments_update_writer_unique_key_clause.append(f't.{_key} = s.{_key}')
for _field in _failed_payments_update_writer_fields_to_update:
if(_field not in _failed_payments_update_writer_unique_key_clause):
_failed_payments_update_writer_set_clause.append(f't.{_field} = s.{_field}')
_merge_query = '''
MERGE INTO dremio.failedpayments t
USING failed_payments_update_mapper_df s
ON ''' + ' AND '.join(_failed_payments_update_writer_unique_key_clause) + ''' WHEN MATCHED THEN
UPDATE SET ''' + ', '.join(_failed_payments_update_writer_set_clause) + ' WHEN NOT MATCHED THEN INSERT *'
spark.sql(_merge_query)
# %%
success_payments_mapper_df.write.mode('append').saveAsTable('dremio.payments')

View File

@@ -87,7 +87,7 @@ def failed_payments_reader(spark):
def failed_payments_filter(failed_payments_reader_df, spark):
print(failed_payments_reader_df.columns)
failed_payments_filter_df = spark.sql("select * from failed_payments_reader_df where retry_attempt_count < 3 AND gateway = \'CCS\' AND (retry_status = \'new\' OR retry_status = \'failed\') and payment_id = \'pi_3RvDQqP0JqbrujP90uuhk13h\'")
failed_payments_filter_df = spark.sql("select * from failed_payments_reader_df where retry_attempt_count < 3 AND gateway = \'CCS\' AND (retry_status = \'new\' OR retry_status = \'failed\')")
failed_payments_filter_df.createOrReplaceTempView('failed_payments_filter_df')
return (failed_payments_filter_df,)
@@ -221,7 +221,7 @@ def failed_payments_update_mapper(job_id, payment_api_df, spark):
failed_payments_update_mapper_df=spark.sql(("SELECT " + ', '.join(_failed_payments_update_mapper_select_clause) + " FROM payment_api_df").replace("{job_id}",f"'{job_id}'"))
failed_payments_update_mapper_df.createOrReplaceTempView("failed_payments_update_mapper_df")
return
return (failed_payments_update_mapper_df,)
@app.cell
@@ -261,6 +261,46 @@ def success_payments_mapper(job_id, spark, success_payment_filter_df):
success_payments_mapper_df=spark.sql(("SELECT " + ', '.join(_success_payments_mapper_select_clause) + " FROM success_payment_filter_df").replace("{job_id}",f"'{job_id}'"))
success_payments_mapper_df.createOrReplaceTempView("success_payments_mapper_df")
return (success_payments_mapper_df,)
@app.cell
def failed_payments_update_writer(failed_payments_update_mapper_df, spark):
_failed_payments_update_writer_fields_to_update = failed_payments_update_mapper_df.columns
_failed_payments_update_writer_set_clause=[]
_failed_payments_update_writer_unique_key_clause= []
for _key in ['payment_id']:
_failed_payments_update_writer_unique_key_clause.append(f't.{_key} = s.{_key}')
for _field in _failed_payments_update_writer_fields_to_update:
if(_field not in _failed_payments_update_writer_unique_key_clause):
_failed_payments_update_writer_set_clause.append(f't.{_field} = s.{_field}')
_merge_query = '''
MERGE INTO dremio.failedpayments t
USING failed_payments_update_mapper_df s
ON ''' + ' AND '.join(_failed_payments_update_writer_unique_key_clause) + ''' WHEN MATCHED THEN
UPDATE SET ''' + ', '.join(_failed_payments_update_writer_set_clause) + ' WHEN NOT MATCHED THEN INSERT *'
spark.sql(_merge_query)
return
@app.cell
def success_payments_writer(success_payments_mapper_df):
success_payments_mapper_df.write.mode('append').saveAsTable('dremio.payments')
return

File diff suppressed because one or more lines are too long