From 6eb50726035867c7ea8c5c9c1272158602998b96 Mon Sep 17 00:00:00 2001 From: supriya Date: Mon, 21 May 2018 15:48:28 +0530 Subject: [PATCH 1/4] - Added cluster restart test. - Added redundancy to column tables. --- .../createAndLoadCDCMixedTables.sql | 356 +-- .../cdcConnector/createCDCRowTables.sql | 2446 +++++++++++++++++ .../cdcConnector/scriptsForApp2/tableList.txt | 4 +- .../hydra/cdcConnector/SnappyCDCTest.java | 197 +- .../hydra/cdcConnector/snappyCustomizedHA.inc | 22 +- 5 files changed, 2800 insertions(+), 225 deletions(-) create mode 100644 dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql diff --git a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql index ea1836bc7f..709d3d07a1 100644 --- a/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql +++ b/dtests/src/resources/scripts/cdcConnector/createAndLoadCDCMixedTables.sql @@ -1,224 +1,224 @@ DROP TABLE IF EXISTS PERSON_EVENT; DROP TABLE IF EXISTS staging_PERSON_EVENT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_EVENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/PERSON_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS PERSON_EVENT USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT); +CREATE TABLE IF NOT EXISTS PERSON_EVENT USING column OPTIONS(partition_by 'PRSN_EVNT_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT); DROP TABLE IF EXISTS PERSON_EVENT_ATTRIBUTE; DROP TABLE IF EXISTS staging_PERSON_EVENT_ATTRIBUTE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_EVENT_ATTRIBUTE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_ATTRIBUTE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/PERSON_EVENT_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS PERSON_EVENT_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID,PRSN_EVNT_ATTR_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT_ATTRIBUTE); +CREATE TABLE IF NOT EXISTS PERSON_EVENT_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_EVNT_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,PRSN_EVNT_ID,PRSN_EVNT_ATTR_ID ' ) AS (SELECT * FROM staging_PERSON_EVENT_ATTRIBUTE); DROP TABLE IF EXISTS CLAIM_STATUS; DROP TABLE IF EXISTS staging_CLAIM_STATUS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_STATUS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_STAT_ID ' ) AS (SELECT * FROM staging_CLAIM_STATUS); +CREATE TABLE IF NOT EXISTS CLAIM_STATUS USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_STAT_ID ' ) AS (SELECT * FROM staging_CLAIM_STATUS); DROP TABLE IF EXISTS CLAIM_ADDITIONAL_DIAGNOSIS; DROP TABLE IF EXISTS staging_CLAIM_ADDITIONAL_DIAGNOSIS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_ADDITIONAL_DIAGNOSIS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_ADDITIONAL_DIAGNOSIS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_CLAIM_ADDITIONAL_DIAGNOSIS); +CREATE TABLE IF NOT EXISTS CLAIM_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_CLAIM_ADDITIONAL_DIAGNOSIS); DROP TABLE IF EXISTS CLAIM_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_DETAIL; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_DETAIL USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_DETAIL); +CREATE TABLE IF NOT EXISTS CLAIM_DETAIL USING column OPTIONS(partition_by 'PRSN_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_DETAIL); DROP TABLE IF EXISTS CLAIM_PAYMENT_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_PAYMENT_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_PAYMENT_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_ID,CLM_PAY_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_DETAIL); +CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_ID,CLM_PAY_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_DETAIL); DROP TABLE IF EXISTS CLAIM_ATTRIBUTE; DROP TABLE IF EXISTS staging_CLAIM_ATTRIBUTE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_ATTRIBUTE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_ATTRIBUTE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ATTR_ID ' ) AS (SELECT * FROM staging_CLAIM_ATTRIBUTE); +CREATE TABLE IF NOT EXISTS CLAIM_ATTRIBUTE USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_ATTR_ID ' ) AS (SELECT * FROM staging_CLAIM_ATTRIBUTE); DROP TABLE IF EXISTS CLAIM; DROP TABLE IF EXISTS staging_CLAIM; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID ' ) AS (SELECT * FROM staging_CLAIM); +CREATE TABLE IF NOT EXISTS CLAIM USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID ' ) AS (SELECT * FROM staging_CLAIM); DROP TABLE IF EXISTS PERSON_CONTACT; DROP TABLE IF EXISTS staging_PERSON_CONTACT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_CONTACT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CONTACT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/PERSON_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS PERSON_CONTACT USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CNTC_ID,PRSN_CNTC_ID ' ) AS (SELECT * FROM staging_PERSON_CONTACT); +CREATE TABLE IF NOT EXISTS PERSON_CONTACT USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CNTC_ID,PRSN_CNTC_ID ' ) AS (SELECT * FROM staging_PERSON_CONTACT); DROP TABLE IF EXISTS ORGANIZATION_CODE; DROP TABLE IF EXISTS staging_ORGANIZATION_CODE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ORGANIZATION_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/ORGANIZATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS ORGANIZATION_CODE USING column OPTIONS(partition_by 'ORG_ID', buckets '32',key_columns 'CLIENT_ID,ORG_ID,CD_VAL_ID,ORG_CD_ID ' ) AS (SELECT * FROM staging_ORGANIZATION_CODE); +CREATE TABLE IF NOT EXISTS ORGANIZATION_CODE USING column OPTIONS(partition_by 'ORG_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,ORG_ID,CD_VAL_ID,ORG_CD_ID ' ) AS (SELECT * FROM staging_ORGANIZATION_CODE); DROP TABLE IF EXISTS COMPLAINT_STATUS; DROP TABLE IF EXISTS staging_COMPLAINT_STATUS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_COMPLAINT_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMPLAINT_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/COMPLAINT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS COMPLAINT_STATUS USING column OPTIONS(partition_by 'INQ_ID',buckets '32',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID,COMPLAINT_STAT_ID ' ) AS (SELECT * FROM staging_COMPLAINT_STATUS); +CREATE TABLE IF NOT EXISTS COMPLAINT_STATUS USING column OPTIONS(partition_by 'INQ_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID,COMPLAINT_STAT_ID ' ) AS (SELECT * FROM staging_COMPLAINT_STATUS); DROP TABLE IF EXISTS CONTACT; DROP TABLE IF EXISTS staging_CONTACT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CONTACT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CONTACT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',key_columns 'CLIENT_ID,CNTC_ID' ) AS (SELECT * FROM staging_CONTACT); +CREATE TABLE IF NOT EXISTS CONTACT USING column OPTIONS(partition_by 'CNTC_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,CNTC_ID' ) AS (SELECT * FROM staging_CONTACT); DROP TABLE IF EXISTS CLAIM_PAYMENT; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_PAYMENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLAIM_PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT USING column OPTIONS(partition_by 'CLM_PAY_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT); +CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT USING column OPTIONS(partition_by 'CLM_PAY_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT); DROP TABLE IF EXISTS TOPIC_COMMUNICATION; DROP TABLE IF EXISTS staging_TOPIC_COMMUNICATION; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TOPIC_COMMUNICATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TOPIC_COMMUNICATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/TOPIC_COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TOPIC_COMMUNICATION USING column OPTIONS(partition_by 'CMCN_INQ_ID', buckets '32',key_columns ' CLIENT_ID,CMCN_INQ_ID,TPC_INQ_ID,CMCN_ID,TPC_ID' ) AS (SELECT * FROM staging_TOPIC_COMMUNICATION); +CREATE TABLE IF NOT EXISTS TOPIC_COMMUNICATION USING column OPTIONS(partition_by 'CMCN_INQ_ID',redundancy '1', buckets '32',key_columns ' CLIENT_ID,CMCN_INQ_ID,TPC_INQ_ID,CMCN_ID,TPC_ID' ) AS (SELECT * FROM staging_TOPIC_COMMUNICATION); DROP TABLE IF EXISTS CONTACT_TYPE_CONTACT; DROP TABLE IF EXISTS staging_CONTACT_TYPE_CONTACT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CONTACT_TYPE_CONTACT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CONTACT_TYPE_CONTACT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CONTACT_TYPE_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CONTACT_TYPE_CONTACT USING column OPTIONS(partition_by 'CNTC_ID', buckets '32',key_columns 'CLIENT_ID,CNTC_ID,ORG_CNTC_TYP_ID,CNTC_TYP_CNTC_ID ' ) AS (SELECT * FROM staging_CONTACT_TYPE_CONTACT); +CREATE TABLE IF NOT EXISTS CONTACT_TYPE_CONTACT USING column OPTIONS(partition_by 'CNTC_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,CNTC_ID,ORG_CNTC_TYP_ID,CNTC_TYP_CNTC_ID ' ) AS (SELECT * FROM staging_CONTACT_TYPE_CONTACT); DROP TABLE IF EXISTS TOPIC; DROP TABLE IF EXISTS staging_TOPIC; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TOPIC +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TOPIC USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/TOPIC.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS TOPIC USING column OPTIONS(partition_by 'INQ_ID',buckets '32',key_columns 'CLIENT_ID,INQ_ID,TPC_ID ' ) AS (SELECT * FROM staging_TOPIC); +CREATE TABLE IF NOT EXISTS TOPIC USING column OPTIONS(partition_by 'INQ_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,INQ_ID,TPC_ID ' ) AS (SELECT * FROM staging_TOPIC); DROP TABLE IF EXISTS LINE_ADDITIONAL_DIAGNOSIS; DROP TABLE IF EXISTS staging_LINE_ADDITIONAL_DIAGNOSIS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LINE_ADDITIONAL_DIAGNOSIS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_ADDITIONAL_DIAGNOSIS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/LINE_ADDITIONAL_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS LINE_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_ADD_DIAG_ID,LN_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_LINE_ADDITIONAL_DIAGNOSIS); +CREATE TABLE IF NOT EXISTS LINE_ADDITIONAL_DIAGNOSIS USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,CLM_ADD_DIAG_ID,LN_ADD_DIAG_ID ' ) AS (SELECT * FROM staging_LINE_ADDITIONAL_DIAGNOSIS); DROP TABLE IF EXISTS PROCEDURE_CODE; DROP TABLE IF EXISTS staging_PROCEDURE_CODE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PROCEDURE_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/PROCEDURE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS PROCEDURE_CODE USING column OPTIONS(partition_by 'PR_CD_ID', buckets '32',key_columns 'CLIENT_ID,PR_CD_ID ' ) AS (SELECT * FROM staging_PROCEDURE_CODE); +CREATE TABLE IF NOT EXISTS PROCEDURE_CODE USING column OPTIONS(partition_by 'PR_CD_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,PR_CD_ID ' ) AS (SELECT * FROM staging_PROCEDURE_CODE); DROP TABLE IF EXISTS CODE_VALUE; DROP TABLE IF EXISTS staging_CODE_VALUE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CODE_VALUE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CODE_VALUE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CODE_VALUE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CODE_VALUE USING column OPTIONS(partition_by 'CD_VAL_ID', buckets '32',key_columns 'CLIENT_ID,CD_VAL_ID') AS (SELECT * FROM staging_CODE_VALUE); +CREATE TABLE IF NOT EXISTS CODE_VALUE USING column OPTIONS(partition_by 'CD_VAL_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,CD_VAL_ID') AS (SELECT * FROM staging_CODE_VALUE); DROP TABLE IF EXISTS POSTAL_ADDRESS; DROP TABLE IF EXISTS staging_POSTAL_ADDRESS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_POSTAL_ADDRESS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_POSTAL_ADDRESS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/POSTAL_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS POSTAL_ADDRESS USING column OPTIONS(partition_by 'CNTC_ID',key_columns 'CLIENT_ID,CNTC_ID,PSTL_ADDR_ID') AS (SELECT * FROM staging_POSTAL_ADDRESS); +CREATE TABLE IF NOT EXISTS POSTAL_ADDRESS USING column OPTIONS(partition_by 'CNTC_ID',redundancy '1',key_columns 'CLIENT_ID,CNTC_ID,PSTL_ADDR_ID') AS (SELECT * FROM staging_POSTAL_ADDRESS); DROP TABLE IF EXISTS CLIENTS; DROP TABLE IF EXISTS staging_CLIENTS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLIENTS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENTS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/CLIENTS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLIENTS USING column OPTIONS(partition_by 'CLIENT_ID', buckets '32',key_columns 'CLIENT_ID ' ) AS (SELECT * FROM staging_CLIENTS); +CREATE TABLE IF NOT EXISTS CLIENTS USING column OPTIONS(partition_by 'CLIENT_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID ' ) AS (SELECT * FROM staging_CLIENTS); DROP TABLE IF EXISTS PERSONS; DROP TABLE IF EXISTS staging_PERSONS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/PERSONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS PERSONS USING column OPTIONS(partition_by 'PRSN_ID', buckets '32',key_columns 'CLIENT_ID,PRSN_ID ' ) AS (SELECT * FROM staging_PERSONS); +CREATE TABLE IF NOT EXISTS PERSONS USING column OPTIONS(partition_by 'PRSN_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID ' ) AS (SELECT * FROM staging_PERSONS); DROP TABLE IF EXISTS BANK; DROP TABLE IF EXISTS staging_BANK; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BANK +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/BANK.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS BANK USING column OPTIONS(partition_by 'BNK_ORG_ID', buckets '32',key_columns 'CLIENT_ID,BNK_ORG_ID,BNK_ID ' ) AS (SELECT * FROM staging_BANK); +CREATE TABLE IF NOT EXISTS BANK USING column OPTIONS(partition_by 'BNK_ORG_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,BNK_ORG_ID,BNK_ID ' ) AS (SELECT * FROM staging_BANK); DROP TABLE IF EXISTS BILLING_ENTITY; DROP TABLE IF EXISTS staging_BILLING_ENTITY; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_ENTITY +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/BILLING_ENTITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS BILLING_ENTITY USING column OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID') AS (SELECT * FROM staging_BILLING_ENTITY); +CREATE TABLE IF NOT EXISTS BILLING_ENTITY USING column OPTIONS(partition_by 'BILL_ENT_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID') AS (SELECT * FROM staging_BILLING_ENTITY); DROP TABLE IF EXISTS BENEFIT_PACKAGE; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BENEFIT_PACKAGE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/BENEFIT_PACKAGE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE USING column OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',key_columns 'CLIENT_ID,BENE_PKG_ID' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE); +CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE USING column OPTIONS(partition_by 'BENE_PKG_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,BENE_PKG_ID' ) AS (SELECT * FROM staging_BENEFIT_PACKAGE); DROP TABLE IF EXISTS GROUPS; DROP TABLE IF EXISTS staging_GROUPS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_GROUPS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUPS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS GROUPS USING column OPTIONS(partition_by 'GRP_ID', buckets '32',key_columns 'CLIENT_ID,GRP_ID' ) AS (SELECT * FROM staging_GROUPS); +CREATE TABLE IF NOT EXISTS GROUPS USING column OPTIONS(partition_by 'GRP_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,GRP_ID' ) AS (SELECT * FROM staging_GROUPS); DROP TABLE IF EXISTS COMMUNICATION; DROP TABLE IF EXISTS staging_COMMUNICATION; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_COMMUNICATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMMUNICATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/COMMUNICATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS COMMUNICATION USING column OPTIONS(partition_by 'INQ_ID', buckets '32',key_columns ' CLIENT_ID,INQ_ID,CMCN_ID' ) AS (SELECT * FROM staging_COMMUNICATION); +CREATE TABLE IF NOT EXISTS COMMUNICATION USING column OPTIONS(partition_by 'INQ_ID',redundancy '1', buckets '32',key_columns ' CLIENT_ID,INQ_ID,CMCN_ID' ) AS (SELECT * FROM staging_COMMUNICATION); DROP TABLE IF EXISTS COMPLAINT; DROP TABLE IF EXISTS staging_COMPLAINT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_COMPLAINT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COMPLAINT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/COMPLAINT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS COMPLAINT USING column OPTIONS(partition_by 'INQ_ID',buckets '32',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID ' ) AS (SELECT * FROM staging_COMPLAINT); +CREATE TABLE IF NOT EXISTS COMPLAINT USING column OPTIONS(partition_by 'INQ_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,INQ_ID,COMPLAINT_ID ' ) AS (SELECT * FROM staging_COMPLAINT); DROP TABLE IF EXISTS ADJUSTMENT; DROP TABLE IF EXISTS staging_ADJUSTMENT; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ADJUSTMENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ADJUSTMENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/ADJUSTMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS ADJUSTMENT USING column OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,ADJ_ID ' ) AS (SELECT * FROM staging_ADJUSTMENT); +CREATE TABLE IF NOT EXISTS ADJUSTMENT USING column OPTIONS(partition_by 'BILL_ENT_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,BILL_ENT_ID,ADJ_ID ' ) AS (SELECT * FROM staging_ADJUSTMENT); DROP TABLE IF EXISTS CLAIM_PAYMENT_REDUCTION; DROP TABLE IF EXISTS staging_CLAIM_PAYMENT_REDUCTION; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_PAYMENT_REDUCTION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_PAYMENT_REDUCTION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_PAYMENT_REDUCTION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_REDUCTION USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_REDUCTION); +CREATE TABLE IF NOT EXISTS CLAIM_PAYMENT_REDUCTION USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID',redundancy '1', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID ' ) AS (SELECT * FROM staging_CLAIM_PAYMENT_REDUCTION); DROP TABLE IF EXISTS CLAIM_REDUCTION_DETAIL; DROP TABLE IF EXISTS staging_CLAIM_REDUCTION_DETAIL; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_REDUCTION_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_REDUCTION_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_DETAIL); +CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_DETAIL USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_DTL_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_DETAIL); DROP TABLE IF EXISTS CLAIM_REDUCTION_HISTORY; DROP TABLE IF EXISTS staging_CLAIM_REDUCTION_HISTORY; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_REDUCTION_HISTORY +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_REDUCTION_HISTORY USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_REDUCTION_HISTORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_HISTORY USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_HIST_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_HISTORY); +CREATE TABLE IF NOT EXISTS CLAIM_REDUCTION_HISTORY USING column OPTIONS(partition_by 'CLM_PAY_RDCTN_ID', redundancy '1',buckets '32',key_columns 'CLIENT_ID,CLM_PAY_RDCTN_ID,CLM_RDCTN_HIST_ID ' ) AS (SELECT * FROM staging_CLAIM_REDUCTION_HISTORY); DROP TABLE IF EXISTS CLAIM_COB; DROP TABLE IF EXISTS staging_CLAIM_COB; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_COB +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COB USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS CLAIM_COB USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_COB_ID ' ) AS (SELECT * FROM staging_CLAIM_COB); + CREATE TABLE IF NOT EXISTS CLAIM_COB USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_COB_ID ' ) AS (SELECT * FROM staging_CLAIM_COB); DROP TABLE IF EXISTS CLAIM_HOSPITAL; DROP TABLE IF EXISTS staging_CLAIM_HOSPITAL; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_HOSPITAL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_HOSPITAL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data_20G/CLAIM_HOSPITAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); - CREATE TABLE IF NOT EXISTS CLAIM_HOSPITAL USING column OPTIONS(partition_by 'PRSN_ID',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_HOSP_ID ' ) AS (SELECT * FROM staging_CLAIM_HOSPITAL); + CREATE TABLE IF NOT EXISTS CLAIM_HOSPITAL USING column OPTIONS(partition_by 'PRSN_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,PRSN_ID,CLM_ID,CLM_HOSP_ID ' ) AS (SELECT * FROM staging_CLAIM_HOSPITAL); DROP TABLE IF EXISTS UM_INPATIENT; DROP TABLE IF EXISTS staging_UM_INPATIENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_INPATIENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/UM_INPATIENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS UM_INPATIENT USING column OPTIONS(partition_by 'UM_RVW_ID',buckets '32',key_columns 'CLIENT_ID,UM_RVW_ID,UM_INPT_ID ' ) AS (SELECT * FROM staging_UM_INPATIENT); +CREATE TABLE IF NOT EXISTS UM_INPATIENT USING column OPTIONS(partition_by 'UM_RVW_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,UM_RVW_ID,UM_INPT_ID ' ) AS (SELECT * FROM staging_UM_INPATIENT); DROP TABLE IF EXISTS UM_SERVICE; DROP TABLE IF EXISTS staging_UM_SERVICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_SERVICE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_Cluster_Data/UM_SERVICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); -CREATE TABLE IF NOT EXISTS UM_SERVICE USING column OPTIONS(partition_by 'UM_RVW_ID',buckets '32',key_columns 'CLIENT_ID,UM_RVW_ID,UM_SERV_ID ' ) AS (SELECT * FROM staging_UM_SERVICE); +CREATE TABLE IF NOT EXISTS UM_SERVICE USING column OPTIONS(partition_by 'UM_RVW_ID',redundancy '1',buckets '32',key_columns 'CLIENT_ID,UM_RVW_ID,UM_SERV_ID ' ) AS (SELECT * FROM staging_UM_SERVICE); DROP TABLE IF EXISTS INQUIRY; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_INQUIRY +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INQUIRY USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INQUIRY( INQ_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -236,7 +236,7 @@ INSERT INTO INQUIRY SELECT * FROM staging_INQUIRY; DROP TABLE IF EXISTS ORGANIZATIONS; DROP TABLE IF EXISTS staging_ORGANIZATIONS; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ORGANIZATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATIONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATIONS(ORG_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -258,7 +258,7 @@ INSERT INTO ORGANIZATIONS SELECT * FROM staging_ORGANIZATIONS; DROP TABLE IF EXISTS UM_REVIEW; DROP TABLE IF EXISTS staging_UM_REVIEW; -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_REVIEW +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_REVIEW USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_REVIEW(UM_RVW_ID BIGINT NOT NULL, VER BIGINT NOT NULL, @@ -307,7 +307,7 @@ INSERT INTO UM_REVIEW SELECT * FROM staging_UM_REVIEW; DROP TABLE IF EXISTS INVOICE_DETAIL; DROP TABLE IF EXISTS staging_INVOICE_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_INVOICE_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_DETAIL( INV_ID BIGINT NOT NULL, @@ -336,7 +336,7 @@ INSERT INTO INVOICE_DETAIL SELECT * FROM staging_INVOICE_DETAIL; DROP TABLE IF EXISTS BENEFIT_GROUP_NAME; DROP TABLE IF EXISTS staging_BENEFIT_GROUP_NAME; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BENEFIT_GROUP_NAME +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUP_NAME USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( GRP_ID BIGINT NOT NULL, @@ -360,7 +360,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( DROP TABLE IF EXISTS BENEFIT_GROUPS; DROP TABLE IF EXISTS staging_BENEFIT_GROUPS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BENEFIT_GROUPS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUPS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( GRP_ID BIGINT NOT NULL, @@ -383,7 +383,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( DROP TABLE IF EXISTS BILLING_RECONCILIATION; DROP TABLE IF EXISTS staging_BILLING_RECONCILIATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_RECONCILIATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_RECONCILIATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( BILL_ENT_ID BIGINT NOT NULL, @@ -408,7 +408,7 @@ CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( DROP TABLE IF EXISTS CHECKS; DROP TABLE IF EXISTS staging_CHECKS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CHECKS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECKS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECKS( CLM_PAY_ID BIGINT NOT NULL, @@ -434,7 +434,7 @@ CREATE TABLE IF NOT EXISTS CHECKS( DROP TABLE IF EXISTS CLIENT_REFERENCE_DATA; DROP TABLE IF EXISTS staging_CLIENT_REFERENCE_DATA; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLIENT_REFERENCE_DATA +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENT_REFERENCE_DATA USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( CLIENT_REF_DTA_ID BIGINT NOT NULL, @@ -457,7 +457,7 @@ CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( DROP TABLE IF EXISTS COB_CLAIM_DIAGNOSIS; DROP TABLE IF EXISTS staging_COB_CLAIM_DIAGNOSIS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_COB_CLAIM_DIAGNOSIS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_CLAIM_DIAGNOSIS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( PRSN_ID BIGINT NOT NULL, @@ -479,7 +479,7 @@ CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( DROP TABLE IF EXISTS COB_ORGANIZATION_PERSON; DROP TABLE IF EXISTS staging_COB_ORGANIZATION_PERSON; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_COB_ORGANIZATION_PERSON +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_ORGANIZATION_PERSON USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS COB_ORGANIZATION_PERSON( PRSN_ID BIGINT NOT NULL, @@ -512,7 +512,7 @@ INSERT INTO COB_ORGANIZATION_PERSON SELECT * FROM staging_COB_ORGANIZATION_PERSO DROP TABLE IF EXISTS DIAGNOSIS_CODE; DROP TABLE IF EXISTS staging_DIAGNOSIS_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_DIAGNOSIS_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_DIAGNOSIS_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( DIAG_CD_ID BIGINT NOT NULL, @@ -538,7 +538,7 @@ CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( DROP TABLE IF EXISTS ELECTRONIC_ADDRESS; DROP TABLE IF EXISTS staging_ELECTRONIC_ADDRESS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ELECTRONIC_ADDRESS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ELECTRONIC_ADDRESS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( CNTC_ID BIGINT NOT NULL, @@ -559,7 +559,7 @@ CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( DROP TABLE IF EXISTS ENTITY_BANK_ACCOUNT; DROP TABLE IF EXISTS staging_ENTITY_BANK_ACCOUNT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ENTITY_BANK_ACCOUNT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ENTITY_BANK_ACCOUNT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( bill_ent_id bigint NOT NULL, @@ -580,7 +580,7 @@ CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( DROP TABLE IF EXISTS ETL_METADATA; DROP TABLE IF EXISTS staging_ETL_METADATA; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ETL_METADATA +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ETL_METADATA USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ETL_METADATA( MAIN_TBL VARCHAR(15) NOT NULL, @@ -605,7 +605,7 @@ CREATE TABLE IF NOT EXISTS ETL_METADATA( DROP TABLE IF EXISTS EXHIBIT; DROP TABLE IF EXISTS staging_EXHIBIT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_EXHIBIT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, @@ -623,7 +623,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT( DROP TABLE IF EXISTS EXPLANATION_CODE; DROP TABLE IF EXISTS staging_EXPLANATION_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_EXPLANATION_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXPLANATION_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( EXPLAIN_CD_ID BIGINT NOT NULL, @@ -652,7 +652,7 @@ CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( DROP TABLE IF EXISTS FILE_TRANSFER_RUN; DROP TABLE IF EXISTS staging_FILE_TRANSFER_RUN; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_FILE_TRANSFER_RUN +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_RUN USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( FL_TRANS_RUN_ID BIGINT NOT NULL, @@ -686,7 +686,7 @@ CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( DROP TABLE IF EXISTS FILE_TRANSFER_STAT; DROP TABLE IF EXISTS staging_FILE_TRANSFER_STAT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_FILE_TRANSFER_STAT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_STAT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( FL_TRANS_STAT_ID BIGINT NOT NULL, @@ -725,7 +725,7 @@ CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( DROP TABLE IF EXISTS GENERAL_LEDGER; DROP TABLE IF EXISTS staging_GENERAL_LEDGER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_GENERAL_LEDGER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( BILL_ENT_ID BIGINT NOT NULL, @@ -752,7 +752,7 @@ CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( DROP TABLE IF EXISTS GROUP_RELATION; DROP TABLE IF EXISTS staging_GROUP_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_GROUP_RELATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUP_RELATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GROUP_RELATION( GRP_ID BIGINT NOT NULL, @@ -772,7 +772,7 @@ CREATE TABLE IF NOT EXISTS GROUP_RELATION( DROP TABLE IF EXISTS LEP_APPEAL; DROP TABLE IF EXISTS staging_LEP_APPEAL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LEP_APPEAL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL( PRSN_ID BIGINT NOT NULL, @@ -794,7 +794,7 @@ CREATE TABLE IF NOT EXISTS LEP_APPEAL( DROP TABLE IF EXISTS LETTER; DROP TABLE IF EXISTS staging_LETTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LETTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER( PRSN_ID BIGINT NOT NULL, @@ -812,7 +812,7 @@ CREATE TABLE IF NOT EXISTS LETTER( DROP TABLE IF EXISTS LINE_DISALLOW_EXPLANATION; DROP TABLE IF EXISTS staging_BLINE_DISALLOW_EXPLANATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LINE_DISALLOW_EXPLANATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_DISALLOW_EXPLANATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( PRSN_ID BIGINT NOT NULL, @@ -835,7 +835,7 @@ CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( DROP TABLE IF EXISTS LINE_PROCEDURE_MODIFIER; DROP TABLE IF EXISTS staging_LINE_PROCEDURE_MODIFIER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LINE_PROCEDURE_MODIFIER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_PROCEDURE_MODIFIER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( PRSN_ID BIGINT NOT NULL, @@ -857,7 +857,7 @@ CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( DROP TABLE IF EXISTS MARX_CALENDAR; DROP TABLE IF EXISTS staging_MARX_CALENDAR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_MARX_CALENDAR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MARX_CALENDAR USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MARX_CALENDAR( MARX_CAL_ID BIGINT NOT NULL, @@ -879,7 +879,7 @@ CREATE TABLE IF NOT EXISTS MARX_CALENDAR( DROP TABLE IF EXISTS NOTE; DROP TABLE IF EXISTS staging_NOTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_NOTE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_NOTE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS NOTE( INQ_ID BIGINT NOT NULL, @@ -904,7 +904,7 @@ CREATE TABLE IF NOT EXISTS NOTE( DROP TABLE IF EXISTS ORGANIZATION_CONTACT_TYPE; DROP TABLE IF EXISTS staging_ORGANIZATION_CONTACT_TYPE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ORGANIZATION_CONTACT_TYPE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CONTACT_TYPE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( ORG_ID BIGINT NOT NULL, @@ -923,7 +923,7 @@ CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( DROP TABLE IF EXISTS PAYMENT; DROP TABLE IF EXISTS staging_PAYMENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PAYMENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PAYMENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PAYMENT( BILL_ENT_ID BIGINT NOT NULL, @@ -951,7 +951,7 @@ CREATE TABLE IF NOT EXISTS PAYMENT( DROP TABLE IF EXISTS PERSON_COB; DROP TABLE IF EXISTS staging_PERSON_COB; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_COB +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_COB USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_COB( PRSN_ID BIGINT NOT NULL, @@ -1002,7 +1002,7 @@ CREATE TABLE IF NOT EXISTS PERSON_COB( DROP TABLE IF EXISTS PERSON_CODE; DROP TABLE IF EXISTS staging_PERSON_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_CODE( PRSN_ID BIGINT NOT NULL, @@ -1023,7 +1023,7 @@ CREATE TABLE IF NOT EXISTS PERSON_CODE( DROP TABLE IF EXISTS PERSON_LEP_EVENT; DROP TABLE IF EXISTS staging_PERSON_LEP_EVENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_LEP_EVENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_EVENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( PRSN_ID BIGINT NOT NULL, @@ -1045,7 +1045,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( DROP TABLE IF EXISTS PERSON_LEP_PROFILE; DROP TABLE IF EXISTS staging_PERSON_LEP_PROFILE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_LEP_PROFILE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( PRSN_ID BIGINT NOT NULL, @@ -1065,7 +1065,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( DROP TABLE IF EXISTS PERSON_ORGANIZATION_RELATION; DROP TABLE IF EXISTS staging_PERSON_ORGANIZATION_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_ORGANIZATION_RELATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ORGANIZATION_RELATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( PRSN_ID BIGINT NOT NULL, @@ -1087,7 +1087,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( DROP TABLE IF EXISTS PERSON_PAYMENT_OPTIONS; DROP TABLE IF EXISTS staging_PERSON_PAYMENT_OPTIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_PAYMENT_OPTIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_PAYMENT_OPTIONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( PRSN_ID BIGINT NOT NULL, @@ -1109,7 +1109,7 @@ CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( DROP TABLE IF EXISTS PERSON_RELATION; DROP TABLE IF EXISTS staging_PERSON_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_RELATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_RELATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_RELATION( PRSN_ID BIGINT NOT NULL, @@ -1131,7 +1131,7 @@ CREATE TABLE IF NOT EXISTS PERSON_RELATION( DROP TABLE IF EXISTS PERSON_ROLE; DROP TABLE IF EXISTS staging_PERSON_ROLE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_ROLE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ROLE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ROLE( PRSN_ID BIGINT NOT NULL, @@ -1152,7 +1152,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ROLE( DROP TABLE IF EXISTS PERSON_SUBSIDY_PROFILE; DROP TABLE IF EXISTS staging_PERSON_SUBSIDY_PROFILE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_SUBSIDY_PROFILE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_SUBSIDY_PROFILE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( PRSN_ID BIGINT NOT NULL, @@ -1175,7 +1175,7 @@ CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( DROP TABLE IF EXISTS PERSON_WORK_ITEM; DROP TABLE IF EXISTS staging_PERSON_WORK_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_WORK_ITEM +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_WORK_ITEM USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( PRSN_ID BIGINT NOT NULL, @@ -1198,7 +1198,7 @@ CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( DROP TABLE IF EXISTS PHONE; DROP TABLE IF EXISTS staging_PHONE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PHONE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PHONE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PHONE( CNTC_ID BIGINT NOT NULL, @@ -1220,7 +1220,7 @@ CREATE TABLE IF NOT EXISTS PHONE( DROP TABLE IF EXISTS PLAN_MEMBERSHIPS; DROP TABLE IF EXISTS staging_PLAN_MEMBERSHIPS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PLAN_MEMBERSHIPS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PLAN_MEMBERSHIPS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( PRSN_ID BIGINT NOT NULL, @@ -1247,7 +1247,7 @@ CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( DROP TABLE IF EXISTS POS_CODE; DROP TABLE IF EXISTS staging_POS_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_POS_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_POS_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS POS_CODE( POS_CD_ID BIGINT NOT NULL, @@ -1269,7 +1269,7 @@ CREATE TABLE IF NOT EXISTS POS_CODE( DROP TABLE IF EXISTS PREMIUM; DROP TABLE IF EXISTS staging_PREMIUM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PREMIUM +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM( GRP_ID BIGINT NOT NULL, @@ -1293,7 +1293,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM( DROP TABLE IF EXISTS REFERENCE_DATA; DROP TABLE IF EXISTS staging_REFERENCE_DATAY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_REFERENCE_DATA +CREATE EXTERNAL TABLE IF NOT EXISTS staging_REFERENCE_DATA USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REFERENCE_DATA( REF_DTA_ID BIGINT NOT NULL, @@ -1310,7 +1310,7 @@ CREATE TABLE IF NOT EXISTS REFERENCE_DATA( DROP TABLE IF EXISTS RETURNED_MAIL; DROP TABLE IF EXISTS staging_RETURNED_MAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_RETURNED_MAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_RETURNED_MAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RETURNED_MAIL( RTRN_MAIL_ID BIGINT NOT NULL, @@ -1332,7 +1332,7 @@ CREATE TABLE IF NOT EXISTS RETURNED_MAIL( DROP TABLE IF EXISTS UM_ACTIVITY; DROP TABLE IF EXISTS staging_UM_ACTIVITY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_ACTIVITY +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_ACTIVITY USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_ACTIVITY( UM_RVW_ID BIGINT NOT NULL, @@ -1361,7 +1361,7 @@ CREATE TABLE IF NOT EXISTS UM_ACTIVITY( DROP TABLE IF EXISTS UM_DIAGNOSIS_LINE; DROP TABLE IF EXISTS staging_UM_DIAGNOSIS_LINE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_DIAGNOSIS_LINE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_LINE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( UM_RVW_ID BIGINT NOT NULL, @@ -1384,7 +1384,7 @@ CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( DROP TABLE IF EXISTS UM_DIAGNOSIS_SET; DROP TABLE IF EXISTS staging_UM_DIAGNOSIS_SET; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_DIAGNOSIS_SET +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_SET USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( UM_RVW_ID BIGINT NOT NULL, @@ -1405,7 +1405,7 @@ CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( DROP TABLE IF EXISTS BENEFIT_PACKAGE_RELATION; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE_RELATION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BENEFIT_PACKAGE_RELATION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_RELATION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( BENE_PKG_ID BIGINT NOT NULL, @@ -1425,7 +1425,7 @@ CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( DROP TABLE IF EXISTS BILLING_ENTITY_CONTACT; DROP TABLE IF EXISTS staging_BILLING_ENTITY_CONTACT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_ENTITY_CONTACT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_CONTACT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( BILL_ENT_ID BIGINT NOT NULL, @@ -1447,7 +1447,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( DROP TABLE IF EXISTS BILLING_ENTITY_DETAIL; DROP TABLE IF EXISTS staging_BILLING_ENTITY_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_ENTITY_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( BILL_ENT_ID BIGINT NOT NULL, @@ -1467,7 +1467,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( DROP TABLE IF EXISTS CHECK_STATUS; DROP TABLE IF EXISTS staging_CHECK_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CHECK_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECK_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHECK_STATUS( CLM_PAY_ID BIGINT NOT NULL, @@ -1492,7 +1492,7 @@ CREATE TABLE IF NOT EXISTS CHECK_STATUS( DROP TABLE IF EXISTS EXHIBIT_GROUP; DROP TABLE IF EXISTS staging_EXHIBIT_GROUP; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_EXHIBIT_GROUP +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( EXHIBIT_GRP_ID BIGINT NOT NULL, @@ -1515,7 +1515,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( DROP TABLE IF EXISTS EXHIBIT_GROUP_EXHIBIT; DROP TABLE IF EXISTS staging_EXHIBIT_GROUP_EXHIBIT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_EXHIBIT_GROUP_EXHIBIT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP_EXHIBIT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( EXHIBIT_ID BIGINT NOT NULL, @@ -1537,7 +1537,7 @@ CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( DROP TABLE IF EXISTS GENERAL_LEDGER_MAP; DROP TABLE IF EXISTS staging_GENERAL_LEDGER_MAP; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_GENERAL_LEDGER_MAP +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER_MAP USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( GL_MAP_ID BIGINT NOT NULL, @@ -1558,7 +1558,7 @@ CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( DROP TABLE IF EXISTS LEP_APPEAL_DECISION; DROP TABLE IF EXISTS staging_LEP_APPEAL_DECISION; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LEP_APPEAL_DECISION +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL_DECISION USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( PRSN_ID BIGINT NOT NULL, @@ -1581,7 +1581,7 @@ CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( DROP TABLE IF EXISTS LETTER_DETAIL; DROP TABLE IF EXISTS staging_LETTER_DETAIL; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LETTER_DETAIL +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_DETAIL USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_DETAIL( PRSN_ID BIGINT NOT NULL, @@ -1600,7 +1600,7 @@ CREATE TABLE IF NOT EXISTS LETTER_DETAIL( DROP TABLE IF EXISTS LETTER_JOB; DROP TABLE IF EXISTS staging_LETTER_JOB; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LETTER_JOB +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_JOB USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_JOB( PRSN_ID BIGINT NOT NULL, @@ -1625,7 +1625,7 @@ CREATE TABLE IF NOT EXISTS LETTER_JOB( DROP TABLE IF EXISTS LETTER_REGISTER; DROP TABLE IF EXISTS staging_LETTER_REGISTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LETTER_REGISTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_REGISTER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_REGISTER( PRSN_ID BIGINT NOT NULL, @@ -1649,7 +1649,7 @@ CREATE TABLE IF NOT EXISTS LETTER_REGISTER( DROP TABLE IF EXISTS LETTER_WORK_ITEM; DROP TABLE IF EXISTS staging_LETTER_WORK_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_LETTER_WORK_ITEM +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_WORK_ITEM USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( PRSN_ID BIGINT NOT NULL, @@ -1670,7 +1670,7 @@ CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( DROP TABLE IF EXISTS PERSON_EVENT_STATUS; DROP TABLE IF EXISTS staging_PERSON_EVENT_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_EVENT_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( PRSN_EVNT_ID BIGINT NOT NULL, @@ -1690,7 +1690,7 @@ CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( DROP TABLE IF EXISTS PERSON_EVENT_STATUS_REASON; DROP TABLE IF EXISTS staging_PERSON_EVENT_STATUS_REASON; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_EVENT_STATUS_REASON +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS_REASON USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( PRSN_EVNT_STAT_ID BIGINT NOT NULL, @@ -1709,7 +1709,7 @@ CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( DROP TABLE IF EXISTS PERSON_LEP_PROFILE_RECORD; DROP TABLE IF EXISTS staging_PERSON_LEP_PROFILE_RECORD; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_LEP_PROFILE_RECORD +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE_RECORD USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( PRSN_ID BIGINT NOT NULL, @@ -1740,7 +1740,7 @@ CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( DROP TABLE IF EXISTS PREMIUM_CATEGORY; DROP TABLE IF EXISTS staging_PREMIUM_CATEGORY; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PREMIUM_CATEGORY +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_CATEGORY USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( GRP_ID BIGINT NOT NULL, @@ -1761,7 +1761,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( DROP TABLE IF EXISTS PREMIUM_PART; DROP TABLE IF EXISTS staging_PREMIUM_PART; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PREMIUM_PART +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_PART USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_PART( GRP_ID BIGINT NOT NULL, @@ -1784,7 +1784,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_PART( DROP TABLE IF EXISTS PREMIUM_TABLE; DROP TABLE IF EXISTS staging_PREMIUM_TABLE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PREMIUM_TABLE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_TABLE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( GRP_ID BIGINT NOT NULL, @@ -1806,7 +1806,7 @@ CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( DROP TABLE IF EXISTS UM_INPATIENT_REVIEW; DROP TABLE IF EXISTS staging_UM_INPATIENT_REVIEW; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_INPATIENT_REVIEW +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_REVIEW USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( UM_RVW_ID BIGINT NOT NULL, @@ -1835,7 +1835,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( DROP TABLE IF EXISTS UM_INPATIENT_STATUS; DROP TABLE IF EXISTS staging_UM_INPATIENT_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_INPATIENT_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( UM_RVW_ID BIGINT NOT NULL, @@ -1859,7 +1859,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( DROP TABLE IF EXISTS UM_SERVICE_OVERRIDE; DROP TABLE IF EXISTS staging_UM_SERVICE_OVERRIDE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_SERVICE_OVERRIDE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE_OVERRIDE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_SERVICE_OVERRIDE( UM_RVW_ID BIGINT NOT NULL, @@ -1886,7 +1886,7 @@ INSERT INTO UM_SERVICE_OVERRIDE SELECT * FROM staging_UM_SERVICE_OVERRIDE; DROP TABLE IF EXISTS BANK_ACCOUNT; DROP TABLE IF EXISTS staging_BANK_ACCOUNT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BANK_ACCOUNT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK_ACCOUNT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( BNK_ORG_ID BIGINT NOT NULL, @@ -1908,7 +1908,7 @@ CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( DROP TABLE IF EXISTS UM_INPATIENT_STAY_LENGTH; DROP TABLE IF EXISTS staging_UM_INPATIENT_STAY_LENGTH; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UM_INPATIENT_STAY_LENGTH +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STAY_LENGTH USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( UM_RVW_ID BIGINT NOT NULL, @@ -1943,7 +1943,7 @@ CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( DROP TABLE IF EXISTS REVENUE_CODE; DROP TABLE IF EXISTS staging_REVENUE_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_REVENUE_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_REVENUE_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS REVENUE_CODE( REV_CD_ID BIGINT NOT NULL, @@ -1966,7 +1966,7 @@ CREATE TABLE IF NOT EXISTS REVENUE_CODE( DROP TABLE IF EXISTS SERVICE_CODE; DROP TABLE IF EXISTS staging_SERVICE_CODE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_SERVICE_CODE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICE_CODE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS SERVICE_CODE( SERV_CD_ID BIGINT NOT NULL, @@ -1987,7 +1987,7 @@ CREATE TABLE IF NOT EXISTS SERVICE_CODE( DROP TABLE IF EXISTS AGREEMENT; DROP TABLE IF EXISTS staging_AGREEMENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_AGREEMENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_AGREEMENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS AGREEMENT( AGREE_ID BIGINT NOT NULL, @@ -2003,12 +2003,12 @@ CREATE TABLE IF NOT EXISTS AGREEMENT( SRC_SYS_REC_ID VARCHAR(15), PRIMARY KEY (CLIENT_ID,AGREE_ID) ) USING row OPTIONS(partition_by 'AGREE_ID', buckets '32',redundancy '1'); - INSERT INTO AGREEMENT SELECT * FROM staging_AGREEMENT); + INSERT INTO AGREEMENT SELECT * FROM staging_AGREEMENT; DROP TABLE IF EXISTS ORGANIZATION_EVENT; DROP TABLE IF EXISTS staging_ORGANIZATION_EVENT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_ORGANIZATION_EVENT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_EVENT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( ORG_ID BIGINT NOT NULL, @@ -2030,7 +2030,7 @@ CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( DROP TABLE IF EXISTS FDI_TX_IDCARD; DROP TABLE IF EXISTS staging_FDI_TX_IDCARD; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_FDI_TX_IDCARD +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_IDCARD USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD ( @@ -2069,7 +2069,7 @@ CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD DROP TABLE IF EXISTS FDI_TX_LETTER; DROP TABLE IF EXISTS staging_FDI_TX_LETTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_FDI_TX_LETTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_LETTER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_TX_LETTER ( @@ -2105,7 +2105,7 @@ CREATE TABLE IF NOT EXISTS FDI_TX_LETTER DROP TABLE IF EXISTS BENEFIT_PACKAGE_ATTRIBUTE; DROP TABLE IF EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BENEFIT_PACKAGE_ATTRIBUTE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( BENE_PKG_ID BIGINT NOT NULL, @@ -2121,12 +2121,12 @@ CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( SRC_SYS_REC_ID VARCHAR(15), PRIMARY KEY (CLIENT_ID,BENE_PKG_ID,BENE_PKG_ATTR_ID) ) USING row OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',redundancy '1'); - INSERT INTO BENEFIT_PACKAGE_ATTRIBUTE SELECT * FROM staging_BENEFIT_PACKAGE_ATTRIBUTE); + INSERT INTO BENEFIT_PACKAGE_ATTRIBUTE SELECT * FROM staging_BENEFIT_PACKAGE_ATTRIBUTE; DROP TABLE IF EXISTS BILLING_ENTITY_SCHEDULE; DROP TABLE IF EXISTS staging_BILLING_ENTITY_SCHEDULE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_ENTITY_SCHEDULE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_SCHEDULE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( BILL_ENT_ID BIGINT NOT NULL, @@ -2149,7 +2149,7 @@ CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( DROP TABLE IF EXISTS BILLING_SCHEDULE; DROP TABLE IF EXISTS staging_BILLING_SCHEDULE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_SCHEDULE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SCHEDULE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( BILL_SCHD_ID BIGINT NOT NULL, @@ -2170,7 +2170,7 @@ CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( DROP TABLE IF EXISTS BILLING_SOURCE; DROP TABLE IF EXISTS staging_BILLING_SOURCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_BILLING_SOURCE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SOURCE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS BILLING_SOURCE( BILL_ENT_ID BIGINT NOT NULL, @@ -2189,7 +2189,7 @@ CREATE TABLE IF NOT EXISTS BILLING_SOURCE( DROP TABLE IF EXISTS CHARGE_ITEM; DROP TABLE IF EXISTS staging_CHARGE_ITEM; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CHARGE_ITEM +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHARGE_ITEM USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CHARGE_ITEM( BILL_ENT_ID BIGINT NOT NULL, @@ -2215,7 +2215,7 @@ CREATE TABLE IF NOT EXISTS CHARGE_ITEM( DROP TABLE IF EXISTS CLAIM_COSHARE_TRACKING; DROP TABLE IF EXISTS staging_CLAIM_COSHARE_TRACKING; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_COSHARE_TRACKING +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COSHARE_TRACKING USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( PRSN_ID BIGINT NOT NULL, @@ -2244,7 +2244,7 @@ CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( DROP TABLE IF EXISTS CLAIM_LINE_ATTRIBUTE; DROP TABLE IF EXISTS staging_CLAIM_LINE_ATTRIBUTE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_CLAIM_LINE_ATTRIBUTE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_LINE_ATTRIBUTE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( PRSN_ID BIGINT NOT NULL, @@ -2269,7 +2269,7 @@ CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( DROP TABLE IF EXISTS FDI_CORRESPONDENCE; DROP TABLE IF EXISTS staging_FDI_CORRESPONDENCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_FDI_CORRESPONDENCE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_CORRESPONDENCE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE ( @@ -2292,7 +2292,7 @@ CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE DROP TABLE IF EXISTS INVOICE; DROP TABLE IF EXISTS staging_INVOICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_INVOICE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE( BILL_ENT_ID BIGINT NOT NULL, @@ -2329,7 +2329,7 @@ CREATE TABLE IF NOT EXISTS INVOICE( DROP TABLE IF EXISTS INVOICE_STATUS; DROP TABLE IF EXISTS staging_INVOICE_STATUS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_INVOICE_STATUS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_STATUS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS INVOICE_STATUS( INV_ID BIGINT NOT NULL, @@ -2350,7 +2350,7 @@ CREATE TABLE IF NOT EXISTS INVOICE_STATUS( DROP TABLE IF EXISTS MOOP_BALANCE_EXCEPTIONS; DROP TABLE IF EXISTS staging_MOOP_BALANCE_EXCEPTIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_MOOP_BALANCE_EXCEPTIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE_EXCEPTIONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( CLIENT_ID INTEGER NOT NULL, @@ -2374,7 +2374,7 @@ CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( DROP TABLE IF EXISTS MOOP_BALANCE; DROP TABLE IF EXISTS staging_MOOP_BALANCE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_MOOP_BALANCE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_BALANCE( PRSN_ID BIGINT NOT NULL, @@ -2400,7 +2400,7 @@ CREATE TABLE IF NOT EXISTS MOOP_BALANCE( DROP TABLE IF EXISTS MOOP_ACCUMULATOR; DROP TABLE IF EXISTS staging_MOOP_ACCUMULATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_MOOP_ACCUMULATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_ACCUMULATOR USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS MOOP_ACCUMULATOR ( SUPPL_ID VARCHAR(15) NOT NULL, @@ -2422,7 +2422,7 @@ SUPPL_ID VARCHAR(15) NOT NULL, DROP TABLE IF EXISTS PERSON_ACCUMULATOR; DROP TABLE IF EXISTS staging_PERSON_ACCUMULATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PERSON_ACCUMULATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ACCUMULATOR USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( PRSN_ID BIGINT NOT NULL, @@ -2453,7 +2453,7 @@ CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( DROP TABLE IF EXISTS PROCEDURE_PRICE; DROP TABLE IF EXISTS staging_PROCEDURE_PRICE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_PROCEDURE_PRICE +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_PRICE USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( PR_CD_ID BIGINT NOT NULL, @@ -2477,7 +2477,7 @@ CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( DROP TABLE IF EXISTS RECEIPT; DROP TABLE IF EXISTS staging_RECEIPT; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_RECEIPT +CREATE EXTERNAL TABLE IF NOT EXISTS staging_RECEIPT USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS RECEIPT( BILL_ENT_ID BIGINT NOT NULL, @@ -2522,7 +2522,7 @@ CREATE TABLE IF NOT EXISTS RECEIPT( DROP TABLE IF EXISTS TMGSERVICES_PLAN_CODE_CONFIG; DROP TABLE IF EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_PLAN_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG ( @@ -2559,7 +2559,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG DROP TABLE IF EXISTS TMGSERVICES_KEY_GENERATOR; DROP TABLE IF EXISTS staging_TMGSERVICES_KEY_GENERATOR; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_KEY_GENERATOR +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR ( @@ -2584,7 +2584,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR DROP TABLE IF EXISTS TMGSERVICES_GROUP_CODE_CONFIG; DROP TABLE IF EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_GROUP_CODE_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG ( @@ -2610,7 +2610,7 @@ CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_GROUP_CODE_CONFIG DROP TABLE IF EXISTS TMGSERVICES_DNIS_CONFIG; DROP TABLE IF EXISTS staging_TMGSERVICES_DNIS_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_DNIS_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG ( @@ -2636,7 +2636,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG DROP TABLE IF EXISTS TMGSERVICES_DENIAL_REASON_CONFIG; DROP TABLE IF EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_DENIAL_REASON_CONFIG +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG ( @@ -2660,7 +2660,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG DROP TABLE IF EXISTS TMGSERVICES_CLIENT_MASTER; DROP TABLE IF EXISTS staging_TMGSERVICES_CLIENT_MASTER; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_CLIENT_MASTER +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER ( @@ -2685,7 +2685,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER DROP TABLE IF EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; DROP TABLE IF EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS ( @@ -2714,7 +2714,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS DROP TABLE IF EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; DROP TABLE IF EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS ( @@ -2739,7 +2739,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS DROP TABLE IF EXISTS TMGSERVICES_ACCOUNTING_CODES; DROP TABLE IF EXISTS staging_TMGSERVICES_ACCOUNTING_CODES; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_TMGSERVICES_ACCOUNTING_CODES +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES ( @@ -2769,7 +2769,7 @@ CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES DROP TABLE IF EXISTS UNAPPLIED_CASH; DROP TABLE IF EXISTS staging_UNAPPLIED_CASHE; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_UNAPPLIED_CASH +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UNAPPLIED_CASH USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( UNAPP_CSH_ID BIGINT NOT NULL, @@ -2792,7 +2792,7 @@ CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( DROP TABLE IF EXISTS WORK_GENERATED_KEYS; DROP TABLE IF EXISTS staging_WORK_GENERATED_KEYS; ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- -CREATE EXTERNAL IF NOT EXISTS TABLE staging_WORK_GENERATED_KEYS +CREATE EXTERNAL TABLE IF NOT EXISTS staging_WORK_GENERATED_KEYS USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); CREATE TABLE IF NOT EXISTS WORK_GENERATED_KEYS( CLIENT_ID BIGINT NOT NULL, diff --git a/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql b/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql new file mode 100644 index 0000000000..584d1f2381 --- /dev/null +++ b/dtests/src/resources/scripts/cdcConnector/createCDCRowTables.sql @@ -0,0 +1,2446 @@ +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INQUIRY + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INQUIRY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS INQUIRY( INQ_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CALLER_TYP_REF_ID BIGINT NOT NULL, + IS_IN_BOUND INT, + TRACK_NUM VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,INQ_ID)) +USING row OPTIONS(partition_by 'INQ_ID', buckets '32',redundancy '1'); +INSERT INTO INQUIRY SELECT * FROM staging_INQUIRY; + +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ORGANIZATIONS(ORG_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ORG_NM VARCHAR(8), + ORG_TYP_REF_ID BIGINT NOT NULL, + DESCR VARCHAR(15), + EMPR_TAX_ID VARCHAR(6), + WEB_SITE VARCHAR(10), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,ORG_ID)) +USING row OPTIONS(partition_by 'ORG_ID', buckets '32',redundancy '1'); +INSERT INTO ORGANIZATIONS SELECT * FROM staging_ORGANIZATIONS; + +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_REVIEW + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_REVIEW(UM_RVW_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + UM_RVW_CD VARCHAR(15), + PRSN_ID BIGINT, + BENE_GRP_ID BIGINT, + SEC_BENE_GRP_ID BIGINT, + SEC_BENE_GRP_ID_1 BIGINT, + CRT_MTHD VARCHAR(15), + USR_ID VARCHAR(15), + CRT_DT DATE, + PRI_DIAG_CD_ID BIGINT, + PRI_RVWER VARCHAR(15), + SEC_RVWER VARCHAR(15), + ACDT_IND VARCHAR(15), + ACDT_DT DATE, + ACDT_ST VARCHAR(15), + OTH_BENE_IND VARCHAR(15), + RISK_IND_REF_ID BIGINT, + QLTY_IND_REF_ID BIGINT, + MBR_COMPL_REF_ID BIGINT, + SERV_TO_DT DATE, + DEATH_DT DATE, + UM_RVW_LNK_ID BIGINT, + UM_LNK_RSN_REF_ID BIGINT, + INQ_ID BIGINT, + INPT_IND VARCHAR(15), + EDI_278_RSPNS_DT DATE, + RVW_PRVD_ID BIGINT, + ALT_UM_RVW_ID VARCHAR(15), + ICD_VER_PROC VARCHAR(15), + ICD_VER_INP VARCHAR(15), + ICD_TRNSLTN_TRN_ID VARCHAR(15), + SBMT_DIAG_CD_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID)) + USING row OPTIONS(partition_by 'UM_RVW_ID', buckets '32',redundancy '1'); +INSERT INTO UM_REVIEW SELECT * FROM staging_UM_REVIEW; + + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_DETAIL + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS INVOICE_DETAIL( + INV_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + INV_DTL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + ENT_SCHD_STRT_DT DATE, + ENT_SCHD_END_DT DATE, + BNFC_ID BIGINT, + WHLD_TYP_REF_ID BIGINT, + DTL_TYP_REF_ID BIGINT, + BNFC_RELN_REF_ID BIGINT, + INV_DTL_AMT NUMERIC(38,8), + CUR_FLG INT, + CR_FLG INT, + BENE_GRP_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,INV_ID,INV_DTL_ID) + ) + USING row OPTIONS(partition_by 'INV_ID', buckets '32',redundancy '1'); +INSERT INTO INVOICE_DETAIL SELECT * FROM staging_INVOICE_DETAIL; + +---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUP_NAME + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUP_NAME.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BENEFIT_GROUP_NAME( + GRP_ID BIGINT NOT NULL, + BENE_GRP_ID BIGINT NOT NULL, + BENE_GRP_NM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BENE_GRP_NM VARCHAR(8), + DESCR VARCHAR(15), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,BENE_GRP_ID,BENE_GRP_NM_ID) + ) + USING row OPTIONS(partition_by 'GRP_ID', buckets '32',redundancy '1'); + INSERT INTO BENEFIT_GROUP_NAME SELECT * FROM staging_BENEFIT_GROUP_NAME; + +---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_GROUPS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_GROUPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BENEFIT_GROUPS( + GRP_ID BIGINT NOT NULL, + BENE_PKG_ID BIGINT NOT NULL, + BENE_GRP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BENE_GRP_CD VARCHAR(5), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,BENE_PKG_ID,BENE_GRP_ID) + ) + USING row OPTIONS(partition_by 'GRP_ID', buckets '32',redundancy '1'); + INSERT INTO BENEFIT_GROUPS SELECT * FROM staging_BENEFIT_GROUPS; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_RECONCILIATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_RECONCILIATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_RECONCILIATION( + BILL_ENT_ID BIGINT NOT NULL, + BILL_RECON_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CR_TYP_REF_ID BIGINT NOT NULL, + CR_SRC_ID BIGINT NOT NULL, + DR_TYP_REF_ID BIGINT NOT NULL, + DR_SRC_ID BIGINT NOT NULL, + AMT NUMERIC(19,4), + CNCL_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,BILL_RECON_ID) + ) + USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO BILLING_RECONCILIATION SELECT * FROM staging_BILLING_RECONCILIATION; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECKS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECKS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CHECKS( + CLM_PAY_ID BIGINT NOT NULL, + CHK_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CHK_SEQ_NUM INTEGER, + CHK_NUM VARCHAR(15), + PAYEE_ID BIGINT, + CASHED_DT DATE, + REISSUED_DT DATE, + PRNT_DT VARCHAR(15), + CHK_TYP VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,CLM_PAY_ID,CHK_ID) + ) + USING row OPTIONS(partition_by 'CLM_PAY_ID', buckets '32',redundancy '1'); + INSERT INTO CHECKS SELECT * FROM staging_CHECKS; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLIENT_REFERENCE_DATA + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLIENT_REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CLIENT_REFERENCE_DATA( + CLIENT_REF_DTA_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CMN_REF_DTA_ID BIGINT, + REF_ENT VARCHAR(15), + REF_DMN VARCHAR(15), + REF_CD VARCHAR(15), + REF_DESCR VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,CLIENT_REF_DTA_ID) + ) + USING row OPTIONS(partition_by 'CLIENT_REF_DTA_ID', buckets '32',redundancy '1' ); + INSERT INTO CLIENT_REFERENCE_DATA SELECT * FROM staging_CLIENT_REFERENCE_DATA; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_CLAIM_DIAGNOSIS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_CLAIM_DIAGNOSIS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS COB_CLAIM_DIAGNOSIS( + PRSN_ID BIGINT NOT NULL, + PRSN_COB_ID BIGINT NOT NULL, + REC_ORD INTEGER NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DIAG_IND CHAR(1), + CLM_DIAG_CD VARCHAR(10), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_COB_ID,REC_ORD) + ) + USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO COB_CLAIM_DIAGNOSIS SELECT * FROM staging_COB_CLAIM_DIAGNOSIS; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_COB_ORGANIZATION_PERSON + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/COB_ORGANIZATION_PERSON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS COB_ORGANIZATION_PERSON( + PRSN_ID BIGINT NOT NULL, + PRSN_COB_ID BIGINT NOT NULL, + ORG_PRSN_TYP_REF_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ORG_NM VARCHAR(8), + FIRST_NM VARCHAR(5), + MID_NM VARCHAR(5), + LAST_NM VARCHAR(5), + EMPR_TAX_ID VARCHAR(6), + SSN VARCHAR(12), + STR_LN1 VARCHAR(10), + STR_LN2 VARCHAR(10), + CTY VARCHAR(7), + ST VARCHAR(5), + PSTL_CD VARCHAR(2), + PHN_NUM VARCHAR(5), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_COB_ID,ORG_PRSN_TYP_REF_ID) + ) + USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); +INSERT INTO COB_ORGANIZATION_PERSON SELECT * FROM staging_COB_ORGANIZATION_PERSON; + + +---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_DIAGNOSIS_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/DIAGNOSIS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS DIAGNOSIS_CODE( + DIAG_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DIAG_CD VARCHAR(15), + DIAG_DESCR VARCHAR(15), + RLTD_DIAG_CD VARCHAR(15), + STD_REF_CD_1 VARCHAR(15), + STD_REF_CD_2 VARCHAR(15), + DIAG_TYP VARCHAR(15), + EXPR_DT DATE, + EFF_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,DIAG_CD_ID) + ) + USING row OPTIONS(partition_by 'DIAG_CD_ID', buckets '32',redundancy '1'); + INSERT INTO DIAGNOSIS_CODE SELECT * FROM staging_DIAGNOSIS_CODE; + +---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ELECTRONIC_ADDRESS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ELECTRONIC_ADDRESS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ELECTRONIC_ADDRESS( + CNTC_ID BIGINT NOT NULL, + ELEC_ADDR_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ELEC_ADDR VARCHAR(10), + ADDR_FMT VARCHAR(5), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,CNTC_ID,ELEC_ADDR_ID) + ) + USING row OPTIONS(partition_by 'CNTC_ID',buckets '32',redundancy '1'); + INSERT INTO ELECTRONIC_ADDRESS SELECT * FROM staging_ELECTRONIC_ADDRESS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ENTITY_BANK_ACCOUNT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ENTITY_BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ENTITY_BANK_ACCOUNT( + bill_ent_id bigint NOT NULL, + bnk_acct_id bigint NOT NULL, + ent_bnk_acct_id bigint NOT NULL, + ver bigint NOT NULL, + client_id bigint NOT NULL, + eff_dt DATE, + expr_dt DATE, + vld_frm_dt DATE NOT NULL, + vld_to_dt DATE, + src_sys_ref_id varchar(10) NOT NULL, + src_sys_rec_id varchar(150), + PRIMARY KEY (client_id,bill_ent_id,bnk_acct_id,ent_bnk_acct_id) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO ENTITY_BANK_ACCOUNT SELECT * FROM staging_ENTITY_BANK_ACCOUNT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ETL_METADATA + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ETL_METADATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ETL_METADATA( + MAIN_TBL VARCHAR(15) NOT NULL, + TGT_TBL VARCHAR(15) NOT NULL, + STMT_ID INTEGER NOT NULL, + COL_ORD_NUM INTEGER NOT NULL, + SRC_COL VARCHAR(15), + TRNSFRMTN_TYP VARCHAR(15) NOT NULL, + SRC_TBL VARCHAR(15), + TGT_COL VARCHAR(15), + REF_VAL_1 VARCHAR(15), + REF_VAL_2 VARCHAR(15), + SRC_REF_TYP VARCHAR(15), + NULL_EMPTY_FLTR VARCHAR(15), + SRC_KEY_TYP VARCHAR(15), + COL_FLTR VARCHAR(15), + DISTINCT_FLG VARCHAR(15), + PRIMARY KEY (MAIN_TBL,TGT_TBL,STMT_ID,COL_ORD_NUM) + ) USING row OPTIONS(partition_by 'COL_ORD_NUM', buckets '32',redundancy '1'); + INSERT INTO ETL_METADATA SELECT * FROM staging_ETL_METADATA; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS EXHIBIT( + EXHIBIT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EXHIBIT_CD_REF_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,EXHIBIT_ID) + ) USING row OPTIONS(partition_by 'EXHIBIT_ID', buckets '32',redundancy '1'); + INSERT INTO EXHIBIT SELECT * FROM staging_EXHIBIT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXPLANATION_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXPLANATION_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS EXPLANATION_CODE( + EXPLAIN_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EXPLAIN_TYP VARCHAR(15), + EXPLAIN_STAT VARCHAR(15), + SD VARCHAR(15), + LD_1 VARCHAR(15), + LD_2 VARCHAR(15), + EXPLAIN_LBLTY_IND VARCHAR(15), + EDI_CLM_STAT_CAT VARCHAR(15), + EDI_CLM_STAT VARCHAR(15), + EDI_CLM_ADJ_RSN VARCHAR(15), + REM_RMRK VARCHAR(15), + PRVD_ADJ_RSN VARCHAR(15), + HC_PLCY_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,EXPLAIN_CD_ID) + ) USING row OPTIONS(partition_by 'EXPLAIN_CD_ID', buckets '32',redundancy '1'); + INSERT INTO EXPLANATION_CODE SELECT * FROM staging_EXPLANATION_CODE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_RUN + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_RUN.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS FILE_TRANSFER_RUN( + FL_TRANS_RUN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + LOG_STMP VARCHAR(15), + TSK_ID BIGINT, + NODE INTEGER, + NMNL_STRT VARCHAR(15), + TSK_NM VARCHAR(15), + RUN_REC_TYP VARCHAR(15), + STRT_TM VARCHAR(15), + END_TM VARCHAR(15), + STARTED_BY VARCHAR(15), + SCSS_FLG VARCHAR(15), + FL_SNT_CNT INTEGER, + BYTE_SNT NUMERIC(38,8), + HAS_BEEN_RD INTEGER, + LAST_ERR_TYP INTEGER, + LAST_ERR_DESCR VARCHAR(15), + HASH VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,FL_TRANS_RUN_ID) + ) USING row OPTIONS(partition_by 'FL_TRANS_RUN_ID', buckets '32',redundancy '1'); + INSERT INTO FILE_TRANSFER_RUN SELECT * FROM staging_FILE_TRANSFER_RUN; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FILE_TRANSFER_STAT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FILE_TRANSFER_STAT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS FILE_TRANSFER_STAT( + FL_TRANS_STAT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + LOG_STMP VARCHAR(15), + TSK_ID BIGINT, + NODE INTEGER, + NMNL_STRT VARCHAR(15), + ACT VARCHAR(15), + SRC_HOST VARCHAR(15), + SRC_FL_FULL_NM VARCHAR(15), + SRC_FL_NM VARCHAR(15), + SRC_FL_NM_ID VARCHAR(15), + SRC_STMP VARCHAR(15), + SRC_BYTE_SNT NUMERIC(38,8), + SRC_DUR NUMERIC(38,8), + TGT_HOST VARCHAR(15), + TGT_FL_FULL_NM VARCHAR(15), + TGT_FL_NM VARCHAR(15), + TGT_FL_NM_ID VARCHAR(15), + TGT_BYTE_RECV NUMERIC(38,8), + TGT_DUR NUMERIC(38,8), + ERR_TYP INTEGER, + DESCR VARCHAR(15), + HASH VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,FL_TRANS_STAT_ID) + ) USING row OPTIONS(partition_by 'FL_TRANS_STAT_ID', buckets '32',redundancy '1'); + INSERT INTO FILE_TRANSFER_STAT SELECT * FROM staging_FILE_TRANSFER_STAT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS GENERAL_LEDGER( + BILL_ENT_ID BIGINT NOT NULL, + GL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SRC_TYP_REF_ID BIGINT, + BILL_SRC_ID BIGINT, + GL_MAP_ID BIGINT NOT NULL, + DR_AMT NUMERIC(18,4) NOT NULL, + CR_AMT NUMERIC(18,4) NOT NULL, + POST_DT DATE, + CRTD_DT DATE, + JE_NUM VARCHAR(5), + BENE_PKG_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,GL_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1'); + INSERT INTO GENERAL_LEDGER SELECT * FROM staging_GENERAL_LEDGER; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GROUP_RELATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GROUP_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS GROUP_RELATION( + GRP_ID BIGINT NOT NULL, + GRP_RELN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + GRP_RELN_TYP_REF_ID BIGINT NOT NULL, + RLTD_GRP_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,GRP_RELN_ID) + ) USING row OPTIONS(partition_by 'GRP_ID',buckets '32',redundancy '1'); + INSERT INTO GROUP_RELATION SELECT * FROM staging_GROUP_RELATION; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LEP_APPEAL( + PRSN_ID BIGINT NOT NULL, + LEP_APL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EXT_APL_ID VARCHAR(5) NOT NULL, + APL_DT DATE, + RCV_DT DATE, + APL_DCN VARCHAR(5), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LEP_APL_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO LEP_APPEAL SELECT * FROM staging_LEP_APPEAL; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LETTER( + PRSN_ID BIGINT NOT NULL, + LTR_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LTR_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LETTER SELECT * FROM staging_LETTER; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_DISALLOW_EXPLANATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_DISALLOW_EXPLANATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LINE_DISALLOW_EXPLANATION( + PRSN_ID BIGINT NOT NULL, + CLM_ID BIGINT NOT NULL, + SEQ_NUM INTEGER NOT NULL, + LN_DISALLOW_EXPLAIN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DISALLOW_AMT NUMERIC(18,4), + EXPLAIN_CD_ID BIGINT, + DISALLOW_EXPLAIN_CD VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,LN_DISALLOW_EXPLAIN_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO LINE_DISALLOW_EXPLANATION SELECT * FROM staging_LINE_DISALLOW_EXPLANATION; + +---- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LINE_PROCEDURE_MODIFIER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LINE_PROCEDURE_MODIFIER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LINE_PROCEDURE_MODIFIER( + PRSN_ID BIGINT NOT NULL, + CLM_ID BIGINT NOT NULL, + SEQ_NUM INTEGER NOT NULL, + LN_PR_MOD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + MOD_SEQ_NUM INTEGER, + PR_MOD_CD VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,CLM_ID,SEQ_NUM,LN_PR_MOD_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LINE_PROCEDURE_MODIFIER SELECT * FROM staging_LINE_PROCEDURE_MODIFIER; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MARX_CALENDAR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MARX_CALENDAR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS MARX_CALENDAR( + MARX_CAL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + PAY_DT DATE, + PLAN_DTA_DUE_DT DATE, + MO_RPT_RLS_DT DATE, + CERT_OF_ENRL_DT DATE, + STRT_DT DATE, + END_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (MARX_CAL_ID) + ) USING row OPTIONS(partition_by 'MARX_CAL_ID', buckets '32',redundancy '1'); + INSERT INTO MARX_CALENDAR SELECT * FROM staging_MARX_CALENDAR; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_NOTE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/NOTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS NOTE( + INQ_ID BIGINT NOT NULL, + SRC_ID BIGINT NOT NULL, + SRC_TYP_REF_ID BIGINT NOT NULL, + NTE_ORD INTEGER NOT NULL, + NTE_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + NTE_TYP_REF_ID BIGINT, + WRK_BASKET VARCHAR(5), + NTE VARCHAR(15), + NTE_TS date, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,INQ_ID,SRC_ID,SRC_TYP_REF_ID,NTE_ORD,NTE_ID) + ) USING row OPTIONS(partition_by 'INQ_ID',buckets '32',redundancy '1'); + INSERT INTO NOTE SELECT * FROM staging_NOTE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_CONTACT_TYPE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_CONTACT_TYPE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ORGANIZATION_CONTACT_TYPE( + ORG_ID BIGINT NOT NULL, + CNTC_TYP_REF_ID BIGINT NOT NULL, + ORG_CNTC_TYP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,ORG_ID,CNTC_TYP_REF_ID,ORG_CNTC_TYP_ID) + ) USING row OPTIONS(partition_by 'ORG_ID', buckets '32',redundancy '1'); + INSERT INTO ORGANIZATION_CONTACT_TYPE SELECT * FROM staging_ORGANIZATION_CONTACT_TYPE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PAYMENT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PAYMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PAYMENT( + BILL_ENT_ID BIGINT NOT NULL, + PAY_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PAY_AMT NUMERIC(18,4) NOT NULL, + CHK_NUM VARCHAR(15), + PAY_SNT_DT DATE, + PAY_SRC_REF_ID BIGINT NOT NULL, + REFUN_RSN_REF_ID BIGINT NOT NULL, + ENT_BNK_ACCT_ID BIGINT, + CNTC_ID BIGINT, + CNCL_DT DATE, + SRC_TYP_REF_ID BIGINT, + BILL_SRC_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,PAY_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1'); + INSERT INTO PAYMENT SELECT * FROM staging_PAYMENT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_COB + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_COB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_COB( + PRSN_ID BIGINT NOT NULL, + PRSN_COB_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ACT_TYP_REF_ID BIGINT, + BIRTH_DT DATE, + GNDR_CD CHAR(1), + SSN VARCHAR(12), + INFORMANT_RELN_REF_ID BIGINT, + RCPT_METHOD_REF_ID BIGINT, + REC_TYP VARCHAR(5) NOT NULL, + RX_ID VARCHAR(2), + RX_GRP VARCHAR(15), + RX_BIN VARCHAR(15), + RX_PCN VARCHAR(15), + RX_PLAN_PHN VARCHAR(10), + COB_SRC VARCHAR(5), + SEQ_NUM VARCHAR(3), + MSP_RSN_CD CHAR(1), + COVRG_CD CHAR(1), + IND_PLCY_NUM VARCHAR(17), + GRP_PLCY_NUM VARCHAR(2), + EFF_DT DATE, + EXPR_DT DATE, + RELSHP_CD VARCHAR(2), + PAYR_ID VARCHAR(10), + PRSN_CD VARCHAR(3), + PAYR_ORD VARCHAR(3), + EMP_INFO_CD CHAR(1), + LEAD_CONTRC VARCHAR(9), + CLS_ACT_TYP VARCHAR(2), + WCSA_AMT NUMERIC(12,2), + WCSA_IND VARCHAR(2), + WCMSA_STLMNT_DT DATE, + TOT_RX_STLMNT_AMT NUMERIC(12,2), + IS_RX_AMT_INCL CHAR(1), + TERM_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_COB_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_COB SELECT * FROM staging_PERSON_COB; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_CODE( + PRSN_ID BIGINT NOT NULL, + CD_VAL_ID BIGINT NOT NULL, + PRSN_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,CD_VAL_ID,PRSN_CD_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_CODE SELECT * FROM staging_PERSON_CODE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_EVENT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_LEP_EVENT( + PRSN_ID BIGINT NOT NULL, + PRSN_LEP_EVNT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + LEP_AMT NUMERIC(14,2), + TOT_UNCOVERED_MOS INTEGER, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_LEP_EVNT_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_LEP_EVENT SELECT * FROM staging_PERSON_LEP_EVENT; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE( + PRSN_ID BIGINT NOT NULL, + PRSN_LEP_PRFL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_LEP_PRFL_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_LEP_PROFILE SELECT * FROM staging_PERSON_LEP_PROFILE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ORGANIZATION_RELATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ORGANIZATION_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_ORGANIZATION_RELATION( + PRSN_ID BIGINT NOT NULL, + ORG_ID BIGINT NOT NULL, + PRSN_ORG_RELN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + RELN_TYP_REF_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + SRC_SYS_REC_ID VARCHAR(15), + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + PRIMARY KEY (CLIENT_ID,PRSN_ID,ORG_ID,PRSN_ORG_RELN_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO PERSON_ORGANIZATION_RELATION SELECT * FROM staging_PERSON_ORGANIZATION_RELATION; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_PAYMENT_OPTIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_PAYMENT_OPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_PAYMENT_OPTIONS( + PRSN_ID BIGINT NOT NULL, + PRSN_PAY_OPTN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PAY_OPTN_REF_ID BIGINT NOT NULL, + WHLD_TYP_REF_ID BIGINT, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_PAY_OPTN_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_PAYMENT_OPTIONS SELECT * FROM staging_PERSON_PAYMENT_OPTIONS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_RELATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_RELATION( + PRSN_ID BIGINT NOT NULL, + RLTD_PRSN_ID BIGINT NOT NULL, + PRSN_RELN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + RELN_TYP_REF_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,RLTD_PRSN_ID,PRSN_RELN_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO PERSON_RELATION SELECT * FROM staging_PERSON_RELATION; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ROLE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ROLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_ROLE( + PRSN_ID BIGINT NOT NULL, + PRSN_TYP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PRSN_TYP_REF_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_TYP_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_ROLE SELECT * FROM staging_PERSON_ROLE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_SUBSIDY_PROFILE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_SUBSIDY_PROFILE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_SUBSIDY_PROFILE( + PRSN_ID BIGINT NOT NULL, + PRSN_SBSDY_PRFL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SBSDY_TYP_REF_ID BIGINT NOT NULL, + SBSDY_LEVEL_REF_ID BIGINT, + COPAY_CAT_REF_ID BIGINT, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_SBSDY_PRFL_ID) +) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_SUBSIDY_PROFILE SELECT * FROM staging_PERSON_SUBSIDY_PROFILE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_WORK_ITEM + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_WORK_ITEM( + PRSN_ID BIGINT NOT NULL, + PRSN_WRK_ITM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + WRK_ITYP_REF_ID BIGINT NOT NULL, + ADD_DT DATE, + PROC_STAT_REF_ID BIGINT, + PROC_STAT_DT DATE, + PRSN_EVNT_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_WRK_ITM_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_WORK_ITEM SELECT * FROM staging_PERSON_WORK_ITEM; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PHONE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PHONE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PHONE( + CNTC_ID BIGINT NOT NULL, + PHN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + AREA_CD VARCHAR(3), + LOC_EXCH VARCHAR(10), + PHN_NUM VARCHAR(5), + PHN_EXT VARCHAR(10), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,CNTC_ID,PHN_ID) + ) USING row OPTIONS(partition_by 'CNTC_ID',buckets '32',redundancy '1'); + INSERT INTO PHONE SELECT * FROM staging_PHONE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PLAN_MEMBERSHIPS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PLAN_MEMBERSHIPS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PLAN_MEMBERSHIPS( + PRSN_ID BIGINT NOT NULL, + BENE_GRP_ID BIGINT NOT NULL, + PLAN_MBRSHP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + IS_BLBL INT, + IS_ELIG INT, + ELIG_RSN_REF_ID BIGINT, + SEC_BENE_GRP BIGINT, + CALC_TS date, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,BENE_GRP_ID,PLAN_MBRSHP_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO PLAN_MEMBERSHIPS SELECT * FROM staging_PLAN_MEMBERSHIPS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_POS_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/POS_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS POS_CODE( + POS_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + POS_CD VARCHAR(15), + POS_DESCR VARCHAR(15), + IO_IND VARCHAR(15), + SERV_CAT_REF_ID BIGINT, + SERV_GRP_REF_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,POS_CD_ID) + ) USING row OPTIONS(partition_by 'POS_CD_ID', buckets '32',redundancy '1'); + INSERT INTO POS_CODE SELECT * FROM staging_POS_CODE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PREMIUM( + GRP_ID BIGINT NOT NULL, + PREM_CAT_ID BIGINT NOT NULL, + PREM_REC_ORD INTEGER NOT NULL, + PREM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EFF_DT DATE NOT NULL, + EXPR_DT DATE, + PREM_AMT_RT NUMERIC(12,4), + PREM_RATIO_FCTR NUMERIC(12,4), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,PREM_CAT_ID,PREM_REC_ORD,PREM_ID) + ) USING row OPTIONS(partition_by 'GRP_ID', buckets '32',redundancy '1'); + INSERT INTO PREMIUM SELECT * FROM staging_PREMIUM; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_REFERENCE_DATA + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REFERENCE_DATA.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS REFERENCE_DATA( + REF_DTA_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + REF_DMN VARCHAR(5) NOT NULL, + REF_CD VARCHAR(30), + DESCR VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + PRIMARY KEY (REF_DTA_ID,REF_DMN) + ) USING row OPTIONS(partition_by 'REF_DTA_ID', buckets '32',redundancy '1'); + INSERT INTO REFERENCE_DATA SELECT * FROM staging_REFERENCE_DATA; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_RETURNED_MAIL + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RETURNED_MAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS RETURNED_MAIL( + RTRN_MAIL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PRSN_ID BIGINT, + RECV_DT DATE, + DOC_NUM VARCHAR(5), + RTRN_MAIL_TYP_CD VARCHAR(10), + RTRN_RSN VARCHAR(10), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,RTRN_MAIL_ID) + ) USING row OPTIONS(partition_by 'RTRN_MAIL_ID', buckets '32',redundancy '1'); + INSERT INTO RETURNED_MAIL SELECT * FROM staging_RETURNED_MAIL; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_ACTIVITY + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_ACTIVITY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_ACTIVITY( + UM_RVW_ID BIGINT NOT NULL, + UM_ACTY_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + USR_ID VARCHAR(15), + CRT_DT DATE, + ACTY_RSN_REF_ID BIGINT, + CMPLX_IND_REF_ID BIGINT, + UM_SERV_ADD INTEGER, + UM_INPT_ADD INTEGER, + UM_INPT_RVW_ADD INTEGER, + UM_NTE_ADD INTEGER, + RTE_USR_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_ACTY_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID', buckets '32',redundancy '1'); + INSERT INTO UM_ACTIVITY SELECT * FROM staging_UM_ACTIVITY; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_LINE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_LINE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_LINE( + UM_RVW_ID BIGINT NOT NULL, + UM_DIAG_SET_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + DIAG_CD_ID BIGINT, + EFF_DT DATE, + SBMT_DIAG_CD_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_DIAG_SET_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID', buckets '32',redundancy '1'); + INSERT INTO UM_DIAGNOSIS_LINE SELECT * FROM staging_UM_DIAGNOSIS_LINE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_DIAGNOSIS_SET + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_DIAGNOSIS_SET.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_DIAGNOSIS_SET( + UM_RVW_ID BIGINT NOT NULL, + UM_DIAG_SET_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CRT_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_DIAG_SET_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID', buckets '32',redundancy '1'); + INSERT INTO UM_DIAGNOSIS_SET SELECT * FROM staging_UM_DIAGNOSIS_SET; + + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_RELATION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_RELATION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_RELATION( + BENE_PKG_ID BIGINT NOT NULL, + PKG_RELN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PKG_RELN_TYP_REF_ID BIGINT NOT NULL, + RLTD_BENE_PKG_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BENE_PKG_ID,PKG_RELN_ID) + ) USING row OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',redundancy '1'); + INSERT INTO BENEFIT_PACKAGE_RELATION SELECT * FROM staging_BENEFIT_PACKAGE_RELATION; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_CONTACT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_CONTACT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_ENTITY_CONTACT( + BILL_ENT_ID BIGINT NOT NULL, + CNTC_ID BIGINT NOT NULL, + BILL_ENT_CNTC_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CNTC_TYP_REF_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,CNTC_ID,BILL_ENT_CNTC_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1'); + INSERT INTO BILLING_ENTITY_CONTACT SELECT * FROM staging_BILLING_ENTITY_CONTACT; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_DETAIL + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_ENTITY_DETAIL( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + FIN_RESP_REF_ID BIGINT NOT NULL, + GRP_ID BIGINT NOT NULL, + IS_GRP_STMT INT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO BILLING_ENTITY_DETAIL SELECT * FROM staging_BILLING_ENTITY_DETAIL; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHECK_STATUS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHECK_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CHECK_STATUS( + CLM_PAY_ID BIGINT NOT NULL, + CHK_ID BIGINT NOT NULL, + CHK_STAT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + STAT_SEQ_NUM INTEGER, + CHK_STAT VARCHAR(15), + CHK_STAT_RSN VARCHAR(15), + STAT_DT DATE, + USR_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,CLM_PAY_ID,CHK_ID,CHK_STAT_ID) + ) USING row OPTIONS(partition_by 'CLM_PAY_ID',buckets '32',redundancy '1'); + INSERT INTO CHECK_STATUS SELECT * FROM staging_CHECK_STATUS; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP( + EXHIBIT_GRP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BENE_GRP_ID BIGINT, + TRIGGERING_EXHIBIT_ID BIGINT NOT NULL, + LANG_REF_ID BIGINT, + SUPPRESSED_FLG INT, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,EXHIBIT_GRP_ID) + ) USING row OPTIONS(partition_by 'EXHIBIT_GRP_ID', buckets '32',redundancy '1' ); + INSERT INTO EXHIBIT_GROUP SELECT * FROM staging_EXHIBIT_GROUP; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_EXHIBIT_GROUP_EXHIBIT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/EXHIBIT_GROUP_EXHIBIT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS EXHIBIT_GROUP_EXHIBIT( + EXHIBIT_ID BIGINT NOT NULL, + EXHIBIT_GRP_ID BIGINT NOT NULL, + EXHIBIT_GRP_EXHIBIT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CMPNT_NUM VARCHAR(10), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,EXHIBIT_ID,EXHIBIT_GRP_ID,EXHIBIT_GRP_EXHIBIT_ID) + ) USING row OPTIONS(partition_by 'EXHIBIT_ID', buckets '32',redundancy '1'); + INSERT INTO EXHIBIT_GROUP_EXHIBIT SELECT * FROM staging_EXHIBIT_GROUP_EXHIBIT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_GENERAL_LEDGER_MAP + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/GENERAL_LEDGER_MAP.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS GENERAL_LEDGER_MAP( + GL_MAP_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CR_ACCT_REF_ID BIGINT NOT NULL, + DR_ACCT_REF_ID BIGINT NOT NULL, + GL_MAP_TYP_REF_ID BIGINT NOT NULL, + GL_MAP_NM VARCHAR(5), + DESCR VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (GL_MAP_ID) + ) USING row OPTIONS(partition_by 'GL_MAP_ID', buckets '32',redundancy '1'); + INSERT INTO GENERAL_LEDGER_MAP SELECT * FROM staging_GENERAL_LEDGER_MAP; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LEP_APPEAL_DECISION + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LEP_APPEAL_DECISION.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LEP_APPEAL_DECISION( + PRSN_ID BIGINT NOT NULL, + LEP_APL_ID BIGINT NOT NULL, + LEP_APL_DEC BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + APL_DEC_DT DATE, + LEP_APL_DEC_REF_ID BIGINT NOT NULL, + RCV_DT DATE, + MAIL_NOTICE_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LEP_APL_ID,LEP_APL_DEC) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LEP_APPEAL_DECISION SELECT * FROM staging_LEP_APPEAL_DECISION; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_DETAIL + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_DETAIL.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LETTER_DETAIL( + PRSN_ID BIGINT NOT NULL, + LTR_ID BIGINT NOT NULL, + LTR_DTL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LTR_ID,LTR_DTL_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LETTER_DETAIL SELECT * FROM staging_LETTER_DETAIL; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_JOB + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_JOB.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LETTER_JOB( + PRSN_ID BIGINT NOT NULL, + LTR_DTL_ID BIGINT NOT NULL, + LTR_JOB_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SRC_REF_ID BIGINT, + LTR_SRC_NUM VARCHAR(5), + QA_DT DATE, + INBOUND_SRC_REF_ID BIGINT, + LTR_INBOUND_SRC_NUM VARCHAR(5), + PG_CNT INTEGER, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LTR_DTL_ID,LTR_JOB_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LETTER_JOB SELECT * FROM staging_LETTER_JOB; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_REGISTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_REGISTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LETTER_REGISTER( + PRSN_ID BIGINT NOT NULL, + LTR_ID BIGINT NOT NULL, + LTR_RGSTR_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EXHIBIT_ID BIGINT NOT NULL, + RQST_DT DATE, + TGR_DT DATE, + PRT_DT DATE, + MAIL_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LTR_ID,LTR_RGSTR_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LETTER_REGISTER SELECT * FROM staging_LETTER_REGISTER; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_LETTER_WORK_ITEM + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/LETTER_WORK_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS LETTER_WORK_ITEM( + PRSN_ID BIGINT NOT NULL, + LTR_RGSTR_ID BIGINT NOT NULL, + PRSN_WRK_ITM_ID BIGINT NOT NULL, + LTR_WRK_ITM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + WRK_ITM_EXIBIT_EFF_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,LTR_RGSTR_ID,PRSN_WRK_ITM_ID,LTR_WRK_ITM_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO LETTER_WORK_ITEM SELECT * FROM staging_LETTER_WORK_ITEM; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS( + PRSN_EVNT_ID BIGINT NOT NULL, + PRSN_EVNT_STAT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + STAT_REF_ID BIGINT NOT NULL, + EVNT_STAT_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_EVNT_ID,PRSN_EVNT_STAT_ID) + ) USING row OPTIONS(partition_by 'PRSN_EVNT_ID', buckets '32',redundancy '1'); + INSERT INTO PERSON_EVENT_STATUS SELECT * FROM staging_PERSON_EVENT_STATUS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_EVENT_STATUS_REASON + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_EVENT_STATUS_REASON.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_EVENT_STATUS_REASON( + PRSN_EVNT_STAT_ID BIGINT NOT NULL, + PRSN_EVNT_STAT_RSN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + RSN_REF_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_EVNT_STAT_ID,PRSN_EVNT_STAT_RSN_ID) + ) USING row OPTIONS(partition_by 'PRSN_EVNT_STAT_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_EVENT_STATUS_REASON SELECT * FROM staging_PERSON_EVENT_STATUS_REASON; + + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_LEP_PROFILE_RECORD + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_LEP_PROFILE_RECORD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_LEP_PROFILE_RECORD( + PRSN_ID BIGINT NOT NULL, + PRSN_LEP_PRFL_ID BIGINT NOT NULL, + PRSN_LEP_PRFL_REC_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + LEP_REC_TYP_REF_ID BIGINT NOT NULL, + CREDITABLE_COVRG_FLG INT, + INCR_UNCOVERED_MOS INTEGER, + PRIOR_PLAN_UNCOVERED_MOS INTEGER, + ATTEST_RSPNS_TYP_REF_ID BIGINT, + ELIG_STRT_DT DATE, + END_OF_IEP DATE, + GAP_STRT_DT DATE, + GAP_END_DT DATE, + BEQ_RSPNS_DT DATE, + RSPNS_TO_ATTEST_DT DATE, + LEP_CMS_CONFIRM_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_LEP_PRFL_ID,PRSN_LEP_PRFL_REC_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO PERSON_LEP_PROFILE_RECORD SELECT * FROM staging_PERSON_LEP_PROFILE_RECORD; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_CATEGORY + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_CATEGORY.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PREMIUM_CATEGORY( + GRP_ID BIGINT NOT NULL, + PREM_CAT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PREM_TBL_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,PREM_CAT_ID) + ) USING row OPTIONS(partition_by 'GRP_ID',buckets '32',redundancy '1'); + INSERT INTO PREMIUM_CATEGORY SELECT * FROM staging_PREMIUM_CATEGORY; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_PART + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_PART.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PREMIUM_PART( + GRP_ID BIGINT NOT NULL, + PREM_PART_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PREM_ID BIGINT, + SBSDY_LEVEL_REF_ID BIGINT, + RESP_PRTY_REF_ID BIGINT NOT NULL, + SPLT_METHOD_REF_ID BIGINT NOT NULL, + PREM_PART_AMT NUMERIC(19,4), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,PREM_PART_ID) + ) USING row OPTIONS(partition_by 'GRP_ID',buckets '32',redundancy '1'); + INSERT INTO PREMIUM_PART SELECT * FROM staging_PREMIUM_PART; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PREMIUM_TABLE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PREMIUM_TABLE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PREMIUM_TABLE( + GRP_ID BIGINT NOT NULL, + BENE_GRP_ID BIGINT NOT NULL, + PREM_TBL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PREM_TYP_REF_ID BIGINT NOT NULL, + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,GRP_ID,BENE_GRP_ID,PREM_TBL_ID) + ) USING row OPTIONS(partition_by 'GRP_ID',buckets '32',redundancy '1'); + INSERT INTO PREMIUM_TABLE SELECT * FROM staging_PREMIUM_TABLE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_REVIEW + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_REVIEW.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_INPATIENT_REVIEW( + UM_RVW_ID BIGINT NOT NULL, + UM_INPT_RVW_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + UM_DIAG_SET_ID BIGINT, + CRT_DT DATE, + TRMT_TYP VARCHAR(15), + RVW_MTHD_REF_ID BIGINT, + RQST_STAY_LEN INTEGER, + AUTH_STAY_LEN INTEGER, + STAY_LEN_RSN_REF_ID BIGINT, + REGN_STAY_LEN INTEGER, + AVG_STAY_LEN INTEGER, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_INPT_RVW_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1'); + INSERT INTO UM_INPATIENT_REVIEW SELECT * FROM staging_UM_INPATIENT_REVIEW); + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STATUS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_INPATIENT_STATUS( + UM_RVW_ID BIGINT NOT NULL, + UM_INPT_STAT BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + USR_ID VARCHAR(15), + UM_INPT_STAT_CD VARCHAR(15), + CRT_DT DATE, + UM_STAT_RSN_REF_ID BIGINT, + SRC_SYS_REC_ID VARCHAR(15), + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_INPT_STAT) + ) USING row OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1' ); + INSERT INTO UM_INPATIENT_STATUS SELECT * FROM staging_UM_INPATIENT_STATUS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_SERVICE_OVERRIDE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_SERVICE_OVERRIDE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_SERVICE_OVERRIDE( + UM_RVW_ID BIGINT NOT NULL, + UM_SERV_ID BIGINT NOT NULL, + UM_SERV_OVRD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + OVRD_IND VARCHAR(15), + OVRD_AMT NUMERIC(14,2), + OVRD_VAL VARCHAR(15), + OVRD_DT DATE, + EXPLAIN_CD_ID BIGINT, + USR_ID VARCHAR(15), + SRC_SYS_REC_ID VARCHAR(15), + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + OPRN VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_SERV_ID,UM_SERV_OVRD_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1') ; +INSERT INTO UM_SERVICE_OVERRIDE SELECT * FROM staging_UM_SERVICE_OVERRIDE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BANK_ACCOUNT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BANK_ACCOUNT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BANK_ACCOUNT( + BNK_ORG_ID BIGINT NOT NULL, + BNK_ID BIGINT NOT NULL, + BNK_ACCT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ACCT_NM VARCHAR(10), + ACCT_NUM VARCHAR(5) NOT NULL, + ACCT_TYP_REF_ID BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BNK_ORG_ID,BNK_ID,BNK_ACCT_ID) + ) USING row OPTIONS(partition_by 'BNK_ORG_ID',buckets '32',redundancy '1' ); + INSERT INTO BANK_ACCOUNT SELECT * FROM staging_BANK_ACCOUNT); + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UM_INPATIENT_STAY_LENGTH + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UM_INPATIENT_STAY_LENGTH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UM_INPATIENT_STAY_LENGTH( + UM_RVW_ID BIGINT NOT NULL, + UM_INPT_RVW_ID BIGINT NOT NULL, + UM_INPT_STAY_LEN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SEQ_NUM INTEGER, + RQST_STAY_LEN INTEGER, + AUTH_STAY_LEN INTEGER, + RM_TYP VARCHAR(15), + SERV_CD VARCHAR(15), + SERV_RULE_CD VARCHAR(15), + SERV_PRC_CD VARCHAR(15), + CHRG_AMT NUMERIC(14,2), + CONTR_AMT NUMERIC(14,2), + PRC_AMT NUMERIC(14,2), + EXPLAIN_CD_ID BIGINT, + RVW_DENIAL_RSN_REF_ID BIGINT, + DENIED_BY_USR_ID VARCHAR(15), + PD_DAYS INTEGER, + ALWD_DAYS INTEGER, + DISALLOW_EXPLAIN BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UM_RVW_ID,UM_INPT_RVW_ID,UM_INPT_STAY_LEN_ID) + ) USING row OPTIONS(partition_by 'UM_RVW_ID',buckets '32',redundancy '1'); + INSERT INTO UM_INPATIENT_STAY_LENGTH SELECT * FROM staging_UM_INPATIENT_STAY_LENGTH; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_REVENUE_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/REVENUE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS REVENUE_CODE( + REV_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + REV_CD VARCHAR(15), + EFF_DT DATE, + END_DT DATE, + DESCR VARCHAR(15), + REV_CD_ACT VARCHAR(15), + EXPLAIN_CD_ID BIGINT, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,REV_CD_ID) + ) USING row OPTIONS(partition_by 'REV_CD_ID', buckets '32',redundancy '1'); + INSERT INTO REVENUE_CODE SELECT * FROM staging_REVENUE_CODE); + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_SERVICE_CODE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/SERVICE_CODE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS SERVICE_CODE( + SERV_CD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SERV_CD VARCHAR(15), + DESCR VARCHAR(15), + SERV_TYP VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,SERV_CD_ID) + ) USING row OPTIONS(partition_by 'SERV_CD_ID', buckets '32',redundancy '1'); + INSERT INTO SERVICE_CODE SELECT * FROM staging_SERVICE_CODE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_AGREEMENT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/AGREEMENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS AGREEMENT( + AGREE_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + AGREE_CD VARCHAR(15), + DESCR VARCHAR(15), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,AGREE_ID) + ) USING row OPTIONS(partition_by 'AGREE_ID', buckets '32',redundancy '1'); + INSERT INTO AGREEMENT SELECT * FROM staging_AGREEMENT; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_ORGANIZATION_EVENT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/ORGANIZATION_EVENT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS ORGANIZATION_EVENT( + ORG_ID BIGINT NOT NULL, + ORG_EVNT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + ORG_EVNT_TYP_REF_ID BIGINT NOT NULL, + EVNT_DESCR VARCHAR(15), + EFF_DT DATE, + EXPR_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REC_ID VARCHAR(15), + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + PRIMARY KEY (CLIENT_ID,ORG_ID,ORG_EVNT_ID) + ) USING row OPTIONS(partition_by 'ORG_ID', buckets '32',redundancy '1'); + INSERT INTO ORGANIZATION_EVENT SELECT * FROM staging_ORGANIZATION_EVENT; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_IDCARD + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_IDCARD.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS FDI_TX_IDCARD + ( + ACN_ID BIGINT NOT NULL, + COMP_ID BIGINT NOT NULL, + PRSN_ID BIGINT NOT NULL, + CD_VAL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DOCUMENT_ID VARCHAR(10) NOT NULL, + DCOUMENT_DESC VARCHAR(15) NOT NULL, + INBD_MSSP_VALUE VARCHAR(15), + FLRQ_PRVN_MAIL_DATE date, + INBD_PRCS_ID VARCHAR(10) NOT NULL, + STR_LN1 VARCHAR(10), + STR_LN2 VARCHAR(10), + STR_LN3 VARCHAR(10), + CTY VARCHAR(7), + ST VARCHAR(5), + PSTL_CD VARCHAR(2), + ORG_ID BIGINT NOT NULL, + ORG_DESC VARCHAR(15), + FLRQ_PREPRINT_DCN VARCHAR(5), + FLRQ_POSTPRINT_DCN VARCHAR(5), + FLRQ_MSSP_WF_STS VARCHAR(10), + FLRQ_LAST_UPD_DATE date, + FLHD_CREATE_DATE date, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (ACN_ID,COMP_ID,PRSN_ID,CD_VAL_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO FDI_TX_IDCARD SELECT * FROM staging_FDI_TX_IDCARD; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_TX_LETTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_TX_LETTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS FDI_TX_LETTER + ( + ACN_ID BIGINT NOT NULL, + COMP_ID BIGINT NOT NULL, + PRSN_ID BIGINT NOT NULL, + CD_VAL_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DOCUMENT_ID VARCHAR(10) NOT NULL, + DCOUMENT_DESC VARCHAR(15) NOT NULL, + INBD_MSSP_VALUE VARCHAR(15), + FLRQ_PRVN_MAIL_DATE date, + INBD_PRCS_ID VARCHAR(10) NOT NULL, + COB_IND INT , + ADDRESS1 VARCHAR(10), + ADDRESS2 VARCHAR(10), + CTY VARCHAR(7), + ST VARCHAR(5), + PSTL_CD VARCHAR(2), + FLRQ_PREPRINT_DCN VARCHAR(5), + FLRQ_POSTPRINT_DCN VARCHAR(5), + FLRQ_LAST_UPD_DATE date, + FLHD_CREATE_DATE date, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (ACN_ID,COMP_ID,PRSN_ID,CD_VAL_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO FDI_TX_LETTER SELECT * FROM staging_FDI_TX_LETTER; + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BENEFIT_PACKAGE_ATTRIBUTE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BENEFIT_PACKAGE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BENEFIT_PACKAGE_ATTRIBUTE( + BENE_PKG_ID BIGINT NOT NULL, + BENE_PKG_ATTR_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + EOB_FLG VARCHAR(15), + PCP_REQT_FLG VARCHAR(15), + USE_ADM_FLG VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BENE_PKG_ID,BENE_PKG_ATTR_ID) + ) USING row OPTIONS(partition_by 'BENE_PKG_ID', buckets '32',redundancy '1'); + INSERT INTO BENEFIT_PACKAGE_ATTRIBUTE SELECT * FROM staging_BENEFIT_PACKAGE_ATTRIBUTE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_ENTITY_SCHEDULE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_ENTITY_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_ENTITY_SCHEDULE( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BILL_SCHD_ID BIGINT NOT NULL, + BILL_ENT_SCHD_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + DUE_DT DATE, + END_DT DATE, + BILL_DT DATE, + BILL_AMT NUMERIC(38,8), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,BILL_SCHD_ID,BILL_ENT_SCHD_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO BILLING_ENTITY_SCHEDULE SELECT * FROM staging_BILLING_ENTITY_SCHEDULE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SCHEDULE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SCHEDULE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_SCHEDULE( + BILL_SCHD_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + SCHD_SRC BIGINT NOT NULL, + ACCT_PRD_STRT DATE, + ACCT_PRD_END DATE, + BILL_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_SCHD_ID) + ) USING row OPTIONS(partition_by 'BILL_SCHD_ID', buckets '32',redundancy '1'); + INSERT INTO BILLING_SCHEDULE SELECT * FROM staging_BILLING_SCHEDULE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_BILLING_SOURCE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/BILLING_SOURCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS BILLING_SOURCE( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + SRC_TYP_REF_ID BIGINT NOT NULL, + BILL_SRC_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,SRC_TYP_REF_ID,BILL_SRC_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO BILLING_SOURCE SELECT * FROM staging_BILLING_SOURCE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CHARGE_ITEM + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CHARGE_ITEM.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CHARGE_ITEM( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BILL_ENT_SCHD_ID BIGINT NOT NULL, + CHRG_ITM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CHRG_ITYP_REF_ID BIGINT NOT NULL, + PREM_PART_ID BIGINT, + INV_ID BIGINT, + END_DT DATE, + INV_DT DATE, + CHRG_ITM_AMT NUMERIC(38,8), + POST_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,BILL_ENT_SCHD_ID,CHRG_ITM_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO CHARGE_ITEM SELECT * FROM staging_CHARGE_ITEM; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_COSHARE_TRACKING + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_COSHARE_TRACKING.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CLAIM_COSHARE_TRACKING( + PRSN_ID BIGINT NOT NULL, + VER BIGINT, + CLIENT_ID BIGINT NOT NULL, + CLAIM_ID BIGINT NOT NULL, + LINE_NO BIGINT NOT NULL, + CLCL_ID VARCHAR(15), + ORG_COPAY_AMT NUMERIC(18,4), + FAUX_COPAY_AMT NUMERIC(18,4), + COST_SHARE_AMT NUMERIC(18,4), + ORIG_COPAY_AMT NUMERIC(18,4), + ORIG_COINS_AMT NUMERIC(18,4), + ORIG_DED_AMT NUMERIC(18,4), + ADJ_TO_CLM_ID BIGINT, + ADJ_FROM_CLM_ID BIGINT, + CALC_BY_ADJ_REF_ID INTEGER, + COPAY_CALC_DT date, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(10), + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID,CLAIM_ID,LINE_NO) + ) USING row OPTIONS(partition_by 'PRSN_ID',buckets '32',redundancy '1'); + INSERT INTO CLAIM_COSHARE_TRACKING SELECT * FROM staging_CLAIM_COSHARE_TRACKING; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_CLAIM_LINE_ATTRIBUTE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/CLAIM_LINE_ATTRIBUTE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS CLAIM_LINE_ATTRIBUTE( + PRSN_ID BIGINT NOT NULL, + CLM_DTL_ID BIGINT NOT NULL, + CLM_LN_ATTR_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CLIENT_ATTR_TYP VARCHAR(15), + CLIENT_ATTR_NUM INTEGER, + ATTR_TYP_REF_ID BIGINT NOT NULL, + NMRC_ATTR NUMERIC(38,8), + TXT_ATTR VARCHAR(15), + TS_ATTR date, + VLD_FRM_DT VARCHAR(15) NOT NULL, + VLD_TO_DT VARCHAR(15), + SRC_SYS_REC_ID VARCHAR(15), + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + PRIMARY KEY (CLIENT_ID,PRSN_ID,CLM_DTL_ID,CLM_LN_ATTR_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO CLAIM_LINE_ATTRIBUTE SELECT * FROM staging_CLAIM_LINE_ATTRIBUTE; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_FDI_CORRESPONDENCE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/FDI_CORRESPONDENCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS FDI_CORRESPONDENCE + ( + CLIENT_ID BIGINT NOT NULL, + ENTITY_ID BIGINT NOT NULL, + DCN_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CORR_NUM_POST_PRINT VARCHAR(10) NOT NULL, + CORR_TYP_VAL VARCHAR(10) NOT NULL, + CORR_TYP_DESCR VARCHAR(500) NOT NULL, + LAST_ACT_TS date, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID, ENTITY_ID, DCN_ID) + ) USING row OPTIONS(partition_by 'CLIENT_ID, ENTITY_ID, DCN_ID', buckets '32',redundancy '1'); + INSERT INTO FDI_CORRESPONDENCE SELECT * FROM staging_FDI_CORRESPONDENCE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS INVOICE( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + INV_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + PAY_OPTN_REF_ID BIGINT, + INIT_FLG INT, + INV_NUM VARCHAR(15), + DESCR VARCHAR(15), + INV_AMT NUMERIC(38,8), + MIN_DUE NUMERIC(38,8), + DUE_DT DATE, + INV_DT DATE, + CNTC_ID BIGINT, + EXTND_DUE_PRD INTEGER, + CUR_DRCT_BAL NUMERIC(38,8), + CUR_SSA_BAL NUMERIC(38,8), + PRIOR_DRCT_BAL NUMERIC(38,8), + PRIOR_SSA_BAL NUMERIC(38,8), + ADD_ACH_AMT NUMERIC(38,8), + ELIG_EXPR_DT DATE, + DECD_CNT INTEGER, + TERM_CNT INTEGER, + CNCL_DT DATE, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,INV_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID', buckets '32',redundancy '1'); + INSERT INTO INVOICE SELECT * FROM staging_INVOICE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_INVOICE_STATUS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/INVOICE_STATUS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS INVOICE_STATUS( + INV_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + INV_STAT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + INV_STAT_REF_ID BIGINT, + INV_STAT_DT DATE, + DESCR VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,INV_ID,INV_STAT_ID) + ) USING row OPTIONS(partition_by 'INV_ID',buckets '32',redundancy '1'); + INSERT INTO INVOICE_STATUS SELECT * FROM staging_INVOICE_STATUS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE_EXCEPTIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE_EXCEPTIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS MOOP_BALANCE_EXCEPTIONS( + CLIENT_ID INTEGER NOT NULL, + VLD_FRM_DT DATE NOT NULL, + VER BIGINT, + SUPPL_ID VARCHAR(15), + MEMBER_ID VARCHAR(15) NOT NULL, + QTR_EFF_DT DATE, + QTR_EXPR_DT DATE, + QTR_PREM_PD_AMT NUMERIC(38,8), + FAUX_COPAY_BAL_AMT NUMERIC(38,8), + COST_SHARE_MAX_AMT NUMERIC(38,8), + COST_SHARE_BAL_AMT NUMERIC(38,8), + VLD_TO_DT DATE, + SRC_SYS_REF_ID VARCHAR(15), + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,MEMBER_ID) + ) USING row OPTIONS(partition_by 'MEMBER_ID', buckets '32',redundancy '1'); + INSERT INTO MOOP_BALANCE_EXCEPTIONS SELECT * FROM staging_MOOP_BALANCE_EXCEPTIONS; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_BALANCE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_BALANCE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS MOOP_BALANCE( + PRSN_ID BIGINT NOT NULL, + CLIENT_ID INTEGER NOT NULL, + FAMILY_ID BIGINT NOT NULL, + VLD_FRM_DT DATE NOT NULL, + VER BIGINT, + SUPPL_ID VARCHAR(15), + MEMBER_ID VARCHAR(15), + QTR_EFF_DT DATE, + QTR_EXPR_DT DATE, + QTR_PREM_PD_AMT NUMERIC(38,8), + FAUX_COPAY_BAL_AMT NUMERIC(38,8), + COST_SHARE_MAX_AMT NUMERIC(38,8), + COST_SHARE_BAL_AMT NUMERIC(38,8), + VLD_TO_DT DATE, + SRC_SYS_REF_ID VARCHAR(15), + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PRSN_ID) + ) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO MOOP_BALANCE SELECT * FROM staging_MOOP_BALANCE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_MOOP_ACCUMULATOR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/MOOP_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS MOOP_ACCUMULATOR ( +SUPPL_ID VARCHAR(15) NOT NULL, + CLIENT_ID INTEGER NOT NULL, + FAMILY_ID BIGINT NOT NULL, + VER BIGINT, + COST_SHARE_MAX_AMT NUMERIC(38,8), + QTR_PREM_PD_AMT NUMERIC(38,8), + FAUX_COPAY_BAL_AMT NUMERIC(38,8), + COST_SHARE_BAL_AMT NUMERIC(38,8), + VLD_FRM_DT DATE NOT NULL, + VLD_TO_DT DATE, + SRC_SYS_REF_ID VARCHAR(15), + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,FAMILY_ID) +) USING row OPTIONS(partition_by 'CLIENT_ID,FAMILY_ID', buckets '32',redundancy '1' ); + INSERT INTO MOOP_ACCUMULATOR SELECT * FROM staging_MOOP_ACCUMULATOR; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PERSON_ACCUMULATOR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PERSON_ACCUMULATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PERSON_ACCUMULATOR( + PRSN_ID BIGINT NOT NULL, + PRSN_ACCUM_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + GRP_ID BIGINT, + ACCUM_TYP VARCHAR(15), + ACCUM_CD VARCHAR(15), + ACCUM_DT DATE, + AMT_1 NUMERIC(38,8), + AMT_2 NUMERIC(38,8), + AMT_3 NUMERIC(38,8), + AMT_4 NUMERIC(38,8), + AMT_5 NUMERIC(38,8), + AMT_6 NUMERIC(38,8), + AMT_7 NUMERIC(38,8), + AMT_8 NUMERIC(38,8), + AMT_9 NUMERIC(38,8), + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + PRIMARY KEY (CLIENT_ID,PRSN_ID,PRSN_ACCUM_ID) +) USING row OPTIONS(partition_by 'PRSN_ID', buckets '32',redundancy '1'); + INSERT INTO PERSON_ACCUMULATOR SELECT * FROM staging_PERSON_ACCUMULATOR; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_PROCEDURE_PRICE + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/PROCEDURE_PRICE.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS PROCEDURE_PRICE( + PR_CD_ID BIGINT NOT NULL, + PR_PRC_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PRC_CD VARCHAR(15), + PSTL_CD VARCHAR(15), + EFF_DT DATE, + EXPR_DT DATE, + RT_AMT NUMERIC(14,2), + DISC_PCTG NUMERIC(14,2), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,PR_CD_ID,PR_PRC_ID) + ) USING row OPTIONS(partition_by 'PR_CD_ID', buckets '32',redundancy '1'); + INSERT INTO PROCEDURE_PRICE SELECT * FROM staging_PROCEDURE_PRICE; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_RECEIPT + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/RECEIPT.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS RECEIPT( + BILL_ENT_ID BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + RCPT_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + WHLD_TYP_REF_ID BIGINT, + RCPT_TYP_REF_ID BIGINT, + RCPT_TYP_CD VARCHAR(15), + RCPT_NUM VARCHAR(15), + RCPT_REC_DT DATE, + RCPT_UPD_DT DATE, + RCPT_AMT NUMERIC(38,8), + ACCT_NUM VARCHAR(15), + CHK_NUM VARCHAR(15), + RCPT_DT DATE, + BATCH_ID VARCHAR(15), + PREM_STRT_DT DATE, + PREM_END_DT DATE, + PRD_MO_CNT INTEGER, + POST_DT DATE, + CONFIRM_NUM VARCHAR(15), + RCPT_STAT_REF_ID BIGINT, + RCPT_STAT_CD VARCHAR(15), + RCPT_RSN_REF_ID BIGINT, + DESCR VARCHAR(15), + BENE_PKG_ID BIGINT, + GRP_ID BIGINT, + PRSN_ID BIGINT, + SRC_TYP_REF_ID BIGINT, + BILL_SRC_ID BIGINT, + RCPT_REVERSAL_IND VARCHAR(15), + USR_ID VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,BILL_ENT_ID,RCPT_ID) + ) USING row OPTIONS(partition_by 'BILL_ENT_ID',buckets '32',redundancy '1'); + INSERT INTO RECEIPT SELECT * FROM staging_RECEIPT; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PLAN_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PLAN_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_PLAN_CODE_CONFIG + ( + PLAN_CODE_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + PLAN_CODE VARCHAR(5) NULL, + GROUP_ID VARCHAR(5) NULL, + PLAN_NAME VARCHAR(10) NULL, + INSTANT_TRANSFER CHAR(1) NULL, + BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + CLAIMS_SELF_SERVICABLE CHAR(1) NULL, + ELIGIBILITY_SELF_SERVICABLE CHAR(1) NULL, + ID_CARD_SELF_SERVICABLE CHAR(1) NULL, + MAX_CLAIMS_FOR_SELF_SERVICE INT NULL, + VALID_DATE_RANGE_FOR_CLAIMS INT NULL, + DENTAL_BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + BEHAV_HEALTH_BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + MEDICAL_BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + PRESCRIPTION_BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + VISION_BENEFITS_SELF_SERVICABLE CHAR(1) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (PLAN_CODE_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'PLAN_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_PLAN_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_PLAN_CODE_CONFIG; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_KEY_GENERATOR + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_KEY_GENERATOR.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_KEY_GENERATOR + ( + KEY_GEN_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + KEY_GEN_FORMAT VARCHAR(5) NULL, + APPLICATION_CODE VARCHAR(5) NULL, + PROCESS_CODE VARCHAR(5) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (KEY_GEN_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'KEY_GEN_ID,CLIENT_ID', buckets '32',redundancy '1' ); + INSERT INTO TMGSERVICES_KEY_GENERATOR SELECT * FROM staging_TMGSERVICES_KEY_GENERATOR; + + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_GROUP_CODE_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_GROUP_CODE_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); + CREATE TABLE IF NOT EXISTS TMGSERVICES_GROUP_CODE_CONFIG + ( + GROUP_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + GROUP_CODE VARCHAR(10) NULL, + GROUP_NAME VARCHAR(10) NULL, + INSTANT_TRANSFER CHAR(1) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (GROUP_ID,CLIENT_ID) + ) + USING row OPTIONS(partition_by 'GROUP_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_GROUP_CODE_CONFIG SELECT * FROM staging_TMGSERVICES_GROUP_CODE_CONFIG; + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DNIS_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DNIS_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_DNIS_CONFIG + ( + DNIS_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DNIS_VALUE VARCHAR(15) NULL, + CORRESPONDENCE_ADDRESS VARCHAR(15) NULL, + BENEFITS_DISCLAIMER VARCHAR(15) NULL, + ELIGIBILITY_DISCLAIMER VARCHAR(15) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (DNIS_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'DNIS_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_DNIS_CONFIG SELECT * FROM staging_TMGSERVICES_DNIS_CONFIG; + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_DENIAL_REASON_CONFIG + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_DENIAL_REASON_CONFIG.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_DENIAL_REASON_CONFIG + ( + DENIAL_REASON_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + DENIAL_CODE VARCHAR(10) NULL, + DENIAL_REASON VARCHAR(15) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (DENIAL_REASON_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'DENIAL_REASON_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_DENIAL_REASON_CONFIG SELECT * FROM staging_TMGSERVICES_DENIAL_REASON_CONFIG; + + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_CLIENT_MASTER + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_CLIENT_MASTER.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_CLIENT_MASTER + ( + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + CLIENT_LEGACY_CD VARCHAR(5) NULL, + CLIENT_NAME VARCHAR(10) NULL, + MEMBER_ID_FORMAT VARCHAR(15) NULL, + TMG_CALL_CLIENT_CODE VARCHAR(10) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (CLIENT_REF_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'CLIENT_REF_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_CLIENT_MASTER SELECT * FROM staging_TMGSERVICES_CLIENT_MASTER; + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS + ( + SUBJ_CAT_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + IVR_MODULE VARCHAR(10) NULL, + SUBJECT_CODE VARCHAR(5) NULL, + IVR_STATE VARCHAR(15) NULL, + CATEGORY_CODE VARCHAR(5) NULL, + CUSTOM_NOTES VARCHAR(20) NULL, + PTMR_TRIGGER CHAR(1) NULL, + SUMMARY_NOTES VARCHAR(15) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (SUBJ_CAT_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'SUBJ_CAT_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_SUBJECT_CATEGORY_TRANSLATIONS; + + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS + ( + VARIABLE_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + VARIABLE_NAME VARCHAR(5) NULL, + VARIABLE_TYPE VARCHAR(5) NULL, + DESCRIPTION VARCHAR(15) NULL, + CREATE_date date NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (VARIABLE_ID) + ) + USING row OPTIONS(partition_by 'VARIABLE_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS SELECT * FROM staging_TMGSERVICES_PTMR_VARIABLE_TRANSLATIONS; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_TMGSERVICES_ACCOUNTING_CODES + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/TMGSERVICES_ACCOUNTING_CODES.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS TMGSERVICES_ACCOUNTING_CODES + ( + ACCOUNTING_CODE_ID INT NOT NULL, + CLIENT_REF_ID INT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + APPLICATION_CODE VARCHAR(5) NULL, + PROCESS_CODE VARCHAR(5) NULL, + ACCOUNTING_CODE VARCHAR(10) NULL, + ACCOUNTING_CODE_OPERATION CHAR(1) NULL, + ACCOUNTING_CODE_DESCRIPTION VARCHAR(250) NULL, + ACCOUNTING_CODE_CATEGORY VARCHAR(250) NULL, + COMPOUND_CODE VARCHAR(5) NULL, + ACCOUNTING_CODE_DEBIT_CREDIT VARCHAR(10) NULL, + CREATE_date date NOT NULL, + UPDATED_date date NULL, + USER_NAME VARCHAR(15) NOT NULL, + VLD_FRM_DT date NULL, + VLD_TO_DT date NULL, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15) NULL, + PRIMARY KEY (ACCOUNTING_CODE_ID,CLIENT_ID) + ) USING row OPTIONS(partition_by 'ACCOUNTING_CODE_ID,CLIENT_ID', buckets '32',redundancy '1'); + INSERT INTO TMGSERVICES_ACCOUNTING_CODES SELECT * FROM staging_TMGSERVICES_ACCOUNTING_CODES; + + +----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_UNAPPLIED_CASH + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/UNAPPLIED_CASH.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS UNAPPLIED_CASH( + UNAPP_CSH_ID BIGINT NOT NULL, + VER BIGINT NOT NULL, + CLIENT_ID BIGINT NOT NULL, + BILL_SRC_ID BIGINT, + SRC_TYP_REF_ID BIGINT, + STAT_REF_ID BIGINT, + ERR_TYP_REF_ID BIGINT, + CNCL_DT DATE, + ERR_DESCR VARCHAR(15), + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REF_ID VARCHAR(15) NOT NULL, + SRC_SYS_REC_ID VARCHAR(15), + PRIMARY KEY (CLIENT_ID,UNAPP_CSH_ID) + ) USING row OPTIONS(partition_by 'UNAPP_CSH_ID', buckets '32',redundancy '1'); + INSERT INTO UNAPPLIED_CASH SELECT * FROM staging_UNAPPLIED_CASH; + + ----- CREATE TEMPORARY STAGING TABLE TO LOAD CSV FORMATTED DATA ----- +CREATE EXTERNAL TABLE IF NOT EXISTS staging_WORK_GENERATED_KEYS + USING com.databricks.spark.csv OPTIONS (path '/export/shared/QA_DATA/TMG_Data/TMG_RowTable_Data/WORK_GENERATED_KEYS.dat', header 'true', inferSchema 'true', nullValue 'NULL', maxCharsPerColumn '4096'); +CREATE TABLE IF NOT EXISTS WORK_GENERATED_KEYS( + CLIENT_ID BIGINT NOT NULL, + GEN_KEY_ID BIGINT NOT NULL, + GEN_KEY_CODE VARCHAR(10) NOT NULL, + GEN_KEY_DESCRIPTION VARCHAR(15) NOT NULL, + GEN_KEY_MIN_LENGTH INT NOT NULL, + GEN_KEY_MAX_LENGTH INT NOT NULL, + GEN_KEY_PATTERN VARCHAR(10), + LAST_GEN_KEY_SEQ BIGINT NOT NULL, + GEN_KEY_CMPT_REF_DTA_ID BIGINT NOT NULL, + GEN_KEY_PAD VARCHAR(12) DEFAULT '0', + GEN_KEY_STRING VARCHAR(12), + GEN_KEY_RESET_VALUE INT DEFAULT 1, + VER BIGINT NOT NULL, + VLD_FRM_DT date NOT NULL, + VLD_TO_DT date, + SRC_SYS_REC_ID VARCHAR(15) NOT NULL, + SRC_SYS_REF_ID VARCHAR(15), + APP_CODE VARCHAR(5) NOT NULL, + MODULE_CODE VARCHAR(5) NOT NULL, + PRIMARY KEY (CLIENT_ID,GEN_KEY_ID) + ) USING row OPTIONS(partition_by 'GEN_KEY_ID', buckets '32',redundancy '1'); + INSERT INTO WORK_GENERATED_KEYS SELECT * FROM staging_WORK_GENERATED_KEYS; diff --git a/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/tableList.txt b/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/tableList.txt index eadbd558ac..239249474d 100644 --- a/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/tableList.txt +++ b/dtests/src/resources/scripts/cdcConnector/scriptsForApp2/tableList.txt @@ -4,7 +4,7 @@ LINE_DISALLOW_EXPLANATION=LN_DISALLOW_EXPLAIN_ID; LINE_PROCEDURE_MODIFIER=LN_PR_MOD_ID; MARX_CALENDAR=MARX_CAL_ID; MOOP_ACCUMULATOR=FAMILY_ID; -MOOP_BALANCE=MEMBER_ID; +MOOP_BALANCE=PRSN_ID; MOOP_BALANCE_EXCEPTIONS=MEMBER_ID; NOTE=NTE_ID; ORGANIZATION_CODE=ORG_CD_ID; @@ -40,7 +40,7 @@ PREMIUM_TABLE=PREM_TBL_ID; PROCEDURE_CODE=PR_CD_ID; PROCEDURE_PRICE=PR_PRC_ID; RECEIPT=RCPT_ID; -REFERENCE_DATA=CLIENT_REF_DTA_ID; +REFERENCE_DATA=REF_DTA_ID; RETURNED_MAIL=RTRN_MAIL_ID; REVENUE_CODE=REV_CD_ID; SERVICE_CODE=SERV_CD_ID; diff --git a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/SnappyCDCTest.java b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/SnappyCDCTest.java index 582b98c327..57bbd8dfd5 100644 --- a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/SnappyCDCTest.java +++ b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/SnappyCDCTest.java @@ -3,11 +3,13 @@ import hydra.Log; import io.snappydata.hydra.cluster.SnappyTest; - import java.io.*; +import java.net.InetAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; import java.util.List; -import java.util.Properties; public class SnappyCDCTest extends SnappyTest { protected static SnappyCDCTest snappyCDCTest; @@ -15,6 +17,24 @@ public class SnappyCDCTest extends SnappyTest { public SnappyCDCTest() { } + + public static Connection getConnections() { + Connection conn = null; + List endpoints = validateLocatorEndpointData(); + String hostPort = endpoints.get(0); + String url = "jdbc:snappydata://" + hostPort; + Log.getLogWriter().info("url is " + url); + String driver = "io.snappydata.jdbc.ClientDriver"; + try { + Class.forName(driver); + conn = DriverManager.getConnection(url); + } catch (Exception ex) { + System.out.println("Caught exception in getConnection() method" + ex.getMessage()); + } + + return conn; + } + public static void HydraTask_runConcurrencyJob() { Log.getLogWriter().info("Inside HydraTask_runConcurrencyJob"); if (snappyCDCTest == null) { @@ -35,7 +55,9 @@ public static void HydraTask_closeStreamingJob() { snappyCDCTest = new SnappyCDCTest(); } try{ - curlCmd = "curl -d \"name="+appName+"&terminate=true\" -X POST http://pnq-spillai3:8080/app/killByName/"; + InetAddress myHost = InetAddress.getLocalHost(); + String hostName[] = myHost.toString().split("/"); + curlCmd = "curl -d \"name="+appName+"&terminate=true\" -X POST http://"+hostName[0]+":8080/app/killByName/"; Log.getLogWriter().info("The curlCmd is " + curlCmd); pb = new ProcessBuilder("/bin/bash", "-c", curlCmd); log = new File("."); @@ -72,67 +94,154 @@ public void runConcurrencyTestJob() { } } + public void deleteDiskStore() { + + } + + public static void performClusterRestartWithNewNode() { + String snappyPath = SnappyCDCPrms.getSnappyFileLoc(); + String nodeInfoforHA = SnappyCDCPrms.getNodeInfoforHA(); + File bkName = null; + File orgName = null; + String query = "SELECT count(*) from AIRLINE"; + try{ + Connection conn = getConnections(); + ResultSet rs = conn.createStatement().executeQuery(query); + Log.getLogWriter().info("Count value is " + rs.getInt(0)); + conn.close(); + ProcessBuilder pbStop = new ProcessBuilder(snappyPath + "/sbin/snappy-stop-all.sh"); + snappyTest.executeProcess(pbStop,null); + bkName = new File(snappyPath + "/conf/servers_bk"); + orgName = new File(snappyPath + "/conf/servers"); + if (orgName.renameTo(bkName)) { + Log.getLogWriter().info("File renamed to " + bkName); + } else { + Log.getLogWriter().info("Error while renaming file"); + } + File newServerConf = new File (snappyPath + "/conf/servers"); + FileWriter fw = new FileWriter(newServerConf,true); + fw.write(nodeInfoforHA); + FileReader reader = new FileReader(bkName); + BufferedReader bufferedReader = new BufferedReader(reader); + String line; + while ((line = bufferedReader.readLine()) != null) { + fw.write(line); + } + reader.close(); + FileReader reader1 = new FileReader(newServerConf); + BufferedReader bufferedReader1 = new BufferedReader(reader1); + String line1; + while ((line1 = bufferedReader1.readLine()) != null) { + System.out.println(line1); + } + reader1.close(); + + Thread.sleep(60000); + ProcessBuilder pbStart = new ProcessBuilder(snappyPath + "/sbin/snappy-start-all.sh"); + snappyTest.executeProcess(pbStart,null); + Thread.sleep(60000); + Connection conn1 = getConnections(); + ResultSet rs1 = conn1.createStatement().executeQuery(query); + Log.getLogWriter().info("Count value is " + rs1.getInt(0)); + conn1.close(); + + //Read the existing server config + } + catch(Exception e){} + } + public static void performHA() { File orgName = null; File bkName = null; File tempConfFile = null; + String scriptName = ""; try { String snappyPath = SnappyCDCPrms.getSnappyFileLoc(); Log.getLogWriter().info("snappyPath File path is " + snappyPath); String nodeType = SnappyCDCPrms.getNodeType(); - orgName = new File(snappyPath + "/conf/" + nodeType); - bkName = new File(snappyPath + "/conf/" + nodeType + "_bk"); - - String nodeInfoforHA = SnappyCDCPrms.getNodeInfoforHA(); - - //rename original conf file - if (orgName.renameTo(bkName)) { - Log.getLogWriter().info("File renamed to " + bkName); - } else { - Log.getLogWriter().info("Error"); - } - - //write a new file in conf - try { - tempConfFile = new File(snappyPath + "/conf/" + nodeType); - FileWriter fw = new FileWriter(tempConfFile); - fw.write(nodeInfoforHA); - fw.close(); + if (nodeType.equals("allNodes")) { File log = new File("."); - String dest = log.getCanonicalPath() + File.separator + "snappyServerSystem.log"; + String dest = log.getCanonicalPath() + File.separator + "clusterRestart.log"; Log.getLogWriter().info("The destination file is " + dest); File logFile = new File(dest); - ProcessBuilder pbStop = new ProcessBuilder(snappyPath + "/sbin/snappy-servers.sh", "stop"); - snappyTest.executeProcess(pbStop, logFile); - - Thread.sleep(30000); //sleep for 3 min before restarting the node. + ProcessBuilder pbClustStop = new ProcessBuilder(snappyPath + "/sbin/snappy-stop-all.sh"); + Long startTime = System.currentTimeMillis(); + snappyTest.executeProcess(pbClustStop, logFile); + Long totalTime = (System.currentTimeMillis() - startTime); + Log.getLogWriter().info("The cluster took " + totalTime + " ms to shut down"); + + //Restart the cluster after 10 mins + Thread.sleep(600000); + ProcessBuilder pbClustStart = new ProcessBuilder(snappyPath + "/sbin/snappy-start-all.sh"); + Long startTime1 = System.currentTimeMillis(); + snappyTest.executeProcess(pbClustStart, logFile); + Long totalTime1 = (System.currentTimeMillis() - startTime1); + Log.getLogWriter().info("The cluster took " + totalTime1 + " ms to shut down"); + } else { + if(nodeType.equalsIgnoreCase("servers")) + scriptName = "/sbin/snappy-servers.sh"; + else if(nodeType.equalsIgnoreCase("leads")) + scriptName = "/sbin/snappy-leads.sh"; + else + scriptName = "/sbin/snappy-locators.sh"; - ProcessBuilder pbStart = new ProcessBuilder(snappyPath + "/sbin/snappy-servers.sh", "start"); - snappyTest.executeProcess(pbStart, logFile); + orgName = new File(snappyPath + "/conf/" + nodeType); + bkName = new File(snappyPath + "/conf/" + nodeType + "_bk"); - Thread.sleep(60000); + String nodeInfoforHA = SnappyCDCPrms.getNodeInfoforHA(); - //delete the temp conf file created. - if (tempConfFile.delete()) { - System.out.println(tempConfFile.getName() + " is deleted!"); + //rename original conf file + if (orgName.renameTo(bkName)) { + Log.getLogWriter().info("File renamed to " + bkName); } else { - System.out.println("Delete operation is failed."); + Log.getLogWriter().info("Error"); } - //restore the back up to its originals. - if (bkName.renameTo(orgName)) { - Log.getLogWriter().info("File renamed to " + orgName); - } else { - Log.getLogWriter().info("Error"); + //write a new file in conf + try { + tempConfFile = new File(snappyPath + "/conf/" + nodeType); + FileWriter fw = new FileWriter(tempConfFile); + fw.write(nodeInfoforHA); + fw.close(); + File log = new File("."); + String dest = log.getCanonicalPath() + File.separator + "snappyServerSystem.log"; + Log.getLogWriter().info("The destination file is " + dest); + File logFile = new File(dest); + + Log.getLogWriter().info("The nodeType is " +nodeType+ " script to stop is " + scriptName); + ProcessBuilder pbStop = new ProcessBuilder(snappyPath + scriptName, "stop"); + snappyTest.executeProcess(pbStop, logFile); + + Thread.sleep(30000); //sleep for 3 min before restarting the node. + + Log.getLogWriter().info("The nodeType is " +nodeType+ " script to start is " + scriptName); + ProcessBuilder pbStart = new ProcessBuilder(snappyPath + scriptName, "start"); + snappyTest.executeProcess(pbStart, logFile); + + Thread.sleep(60000); + + //delete the temp conf file created. + if (tempConfFile.delete()) { + System.out.println(tempConfFile.getName() + " is deleted!"); + } else { + System.out.println("Delete operation is failed."); + } + + //restore the back up to its originals. + if (bkName.renameTo(orgName)) { + Log.getLogWriter().info("File renamed to " + orgName); + } else { + Log.getLogWriter().info("Error"); + } + } catch (FileNotFoundException e) { + // File not found + e.printStackTrace(); + } catch (IOException e) { + // Error when writing to the file + e.printStackTrace(); } - } catch (FileNotFoundException e) { - // File not found - e.printStackTrace(); - } catch (IOException e) { - // Error when writing to the file - e.printStackTrace(); } } catch (Exception e) { diff --git a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/snappyCustomizedHA.inc b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/snappyCustomizedHA.inc index 20f2311b19..e6e40adab0 100644 --- a/dtests/src/test/java/io/snappydata/hydra/cdcConnector/snappyCustomizedHA.inc +++ b/dtests/src/test/java/io/snappydata/hydra/cdcConnector/snappyCustomizedHA.inc @@ -8,6 +8,16 @@ TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMeth maxThreads = 1 maxTimesToRun = 1; +TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMethod = performHA + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeInfoforHA="dev11 -dir=/nfs/users/spillai/tmgdata/locator" + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-snappyFileLoc = "/export/dev11a/users/spillai/snappydata/build-artifacts/scala-2.11/snappy" + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeType = locator + startInterval = 7200 //after 2 hours + endInterval = 7200 + threadGroups = snappyHAThread + maxThreads = 1 + maxTimesToRun = 1; + TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMethod = performHA io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeInfoforHA="dev7 -locators=dev11:10334 -heap-size=10g -memory-size=80g -dir=/nfs/users/spillai/tmgdata/dev7 -spark.executor.cores=16 -J-XX:+HeapDumpOnOutOfMemoryError -critical-heap-percentage=95 -spark.local.dir=/nfs/users/spillai/tmp -hostname-for-clients=10.80.141.111" io.snappydata.hydra.cdcConnector.SnappyCDCPrms-snappyFileLoc = "/export/dev11a/users/spillai/snappydata/build-artifacts/scala-2.11/snappy" @@ -29,7 +39,7 @@ TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMeth maxTimesToRun = 1; TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMethod = performHA - io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeInfoforHA="dev9 -locators=dev11:10334 -heap-size=10g -memory-size=80g -dir=/nfs/users/spillai/tmgdata/dev9-spark.executor.cores=16 -J-XX:+HeapDumpOnOutOfMemoryError -critical-heap-percentage=95 -spark.local.dir=/nfs/users/spillai/tmp -hostname-for-clients=10.80.141.111" + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeInfoforHA="dev9 -locators=dev11:10334 -heap-size=10g -memory-size=80g -dir=/nfs/users/spillai/tmgdata/dev9 -spark.executor.cores=16 -J-XX:+HeapDumpOnOutOfMemoryError -critical-heap-percentage=95 -spark.local.dir=/nfs/users/spillai/tmp -hostname-for-clients=10.80.141.111" io.snappydata.hydra.cdcConnector.SnappyCDCPrms-snappyFileLoc = "/export/dev11a/users/spillai/snappydata/build-artifacts/scala-2.11/snappy" io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeType = servers startInterval = 36000 //after 10 hours @@ -46,4 +56,14 @@ TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMeth endInterval = 50400 threadGroups = snappyHAThread maxThreads = 1 + maxTimesToRun = 1; + +TASK taskClass = io.snappydata.hydra.cdcConnector.SnappyCDCTest taskMethod = performHA + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeInfoforHA="dev11 -locators=dev11:10334 -heap-size=10g -memory-size=80g -dir=/nfs/users/spillai/tmgdata/dev11 -spark.executor.cores=16 -J-XX:+HeapDumpOnOutOfMemoryError -critical-heap-percentage=95 -spark.local.dir=/nfs/users/spillai/tmp -hostname-for-clients=10.80.141.111" + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-snappyFileLoc = "/export/dev11a/users/spillai/snappydata/build-artifacts/scala-2.11/snappy" + io.snappydata.hydra.cdcConnector.SnappyCDCPrms-nodeType = allNodes + startInterval = 54000 //after 15 hours + endInterval = 54000 + threadGroups = snappyHAThread + maxThreads = 1 maxTimesToRun = 1; \ No newline at end of file From fe6ed061d039cd5194c3b33ef097a69b2bda8811 Mon Sep 17 00:00:00 2001 From: vivekwiz Date: Mon, 21 May 2018 17:36:25 +0530 Subject: [PATCH 2/4] Sync store --- store | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store b/store index e678529042..28a4a9ae51 160000 --- a/store +++ b/store @@ -1 +1 @@ -Subproject commit e67852904278c4a64f7ebd17354ca26e88fae3e5 +Subproject commit 28a4a9ae5120e5698c26f97fde13098f6d14a587 From d187c483bcabe4a7b2f927b8f0f8394c121c834f Mon Sep 17 00:00:00 2001 From: ahshahid Date: Mon, 21 May 2018 16:59:59 -0700 Subject: [PATCH 3/4] Moved bug test from snappy/core to cluster so that the bug test runs with snappy/spark --- .../org/apache/spark/sql/store/BugTest.scala | 228 ++++++++++++++++++ .../spark/sql/store/ColumnTableTest.scala | 2 +- .../org/apache/spark/sql/store/ViewTest.scala | 226 ----------------- 3 files changed, 229 insertions(+), 227 deletions(-) diff --git a/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala b/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala index fe57672389..bb8ef6687b 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala @@ -100,4 +100,232 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { assertEquals(1, rs.length) } } + + test("SNAP-2342 nested query involving joins & union throws Exception") { + val user1 = "gemfire1" + val session = snc.newSession() + session.snappySession.conf.set(Attribute.USERNAME_ATTR, user1) + session.snappySession.conf.set(Attribute.PASSWORD_ATTR, user1) + + session.sql(s"create table ujli ( " + + "aagmaterial string," + + "accountassignmentgroup string," + + "accounttype string," + + "allocationcycle string," + + "allocationsegment string," + + "asset string," + + "billingdocument string," + + "billingdocumentitem string," + + "bravoequitycode string," + + "bravominorcode string," + + "bsegdocumentlinenumber string," + + "businessplace string," + + "businesstransaction string," + + "controllingarea string," + + "copadocumentnumber string," + + "copaobjectnumber string," + + "costcenter string," + + "costelement string," + + "countryofshiptocustomer string," + + "createdby string," + + "creationtime string," + + "customer string," + + "customergroup string," + + "debitcreditindicator string," + + "distributionchannel string," + + "division string," + + "documentdate string," + + "documentheadertext string," + + "documentlinenumberinsourcesystem string," + + "documentnumberinsourcesystem string," + + "documenttype string," + + "edgcreateditemindoc string," + + "entrydate string," + + "errorstatus string," + + "fidocumentquantity string," + + "fiscalperiod string," + + "fiscalyear string," + + "fsid string," + + "functionalareacode string," + + "glaccountcode string," + + "hleamount string," + + "indexfromcopa string," + + "itemcategory string," + + "itemtext string," + + "kitmaterial string," + + "kittype string," + + "leamount string," + + "lebillingtype string," + + "lecode string," + + "lecurrencycode string," + + "lesalesqty string," + + "lesalesqtyuom string," + + "ledgercode string," + + "localcompanycode string," + + "localdocumenttype string," + + "localfiscalperiod string," + + "localfiscalyear string," + + "localfunctionalareacode string," + + "localglaccountcode string," + + "locallecurrencycode string," + + "localledgercode string," + + "localmrccode string," + + "localprofitcenter string," + + "localsku string," + + "localversioncode string," + + "mrccode string," + + "parentdocumentnumberinsourcesystem string," + + "partnercostcenter string," + + "partnerfunctionalarea string," + + "partnerprofitcenter string," + + "partnersegment string," + + "payer string," + + "pcadocnumber string," + + "pcaitemnumber string," + + "plant string," + + "postingdate string," + + "postingkey string," + + "producthierarchy string," + + "psegment string," + + "rclnt string," + + "reference string," + + "referencedocument string," + + "referencetransaction string," + + "regionofshiptocustomer string," + + "salesdoctype string," + + "salesgroup string," + + "salesoffice string," + + "salesorder string," + + "salesorderitem string," + + "salesorganization string," + + "sectorproductgroup string," + + "shipto string," + + "sleamount string," + + "sourcesystemid string," + + "tradingpartner string," + + "transactioncode string," + + "transactioncurrencyamount string," + + "transactioncurrencycode string," + + "transactiontype string," + + "ujlkey string," + + "valuefieldfromcopa string," + + "vendor string," + + "versioncode string )") + + session.sql ("create table ujs (" + + "uuid string," + + "bravoequitycode string," + + "controllingarea string," + + "costcenter string," + + "creationtime string," + + "debitcreditindicator string," + + "errstatus string," + + "fiscalyear string," + + "fsid string," + + "functionalareacode string," + + "glaccountcode string," + + "hleamount string," + + "leamount string," + + "lecode string," + + "lecostelement string," + + "lecurrencycode string," + + "leplant string," + + "ledgercode string," + + "localcompanycode string," + + "localfiscalyear string," + + "localfunctionalareacode string," + + "localglaccountcode string," + + "locallecurrencycode string," + + "localledgercode string," + + "localmrccode string," + + "localprofitcenter string," + + "localversioncode string," + + "mrccode string," + + "partnerfunctionalarea string," + + "partnerprofitcenter string," + + "partnersegment string," + + "referencetransaction string," + + "sleamount string," + + "sourceadditionalkey string," + + "sourcesystemid string," + + "tradingpartner string," + + "transactioncurrencyamount string," + + "transactioncurrencycode string," + + "transactiontype string," + + "versioncode string)") + + session.sql("create table gfs (" + + "gfs string, " + + " gfsdescription string, " + + " globalfunctionalarea string )") + + session.sql("create table bravo (" + + " bravo string," + + "bravodescription string," + + " gfs string, " + + " gfsdescription string)") + + session.sql("create table gtw (" + + "gfs string," + + "gfsdescription string," + + "gtw string," + + "gtwdescription string)") + + session.sql("create table coa (" + + "accounttype string," + + "errorcode string," + + "errormessage string," + + "errorstatus string," + + "gfs string," + + "gfsdescription string," + + "globalfunctionalarea string," + + "indicevalue string," + + "localfunctionalarea string," + + "localgl string," + + "localgldescription string)") + + session.sql(s"create or replace view TrialBalance as " + + s"( select leUniversal,gfs,first(gfsDescription) as gfsDescription, " + + s"first(bravo) as bravo, " + + s"first(bravoDescription) as bravoDescription, first(gtw) as gtw, " + + s"first(gtwDescription) as gtwDescription, " + + s"first(globalFunctionalArea) as globalFunctionalArea," + + s"format_number(sum(credit),2) as credit," + + s" format_number(sum(debit),2) as debit,format_number(sum(total),2) as total from" + + s" ( select a.leCode as leUniversal,a.localCompanyCode as leLocal," + + s" a.mrcCode as mrcUniversal," + + s" a.sourceSystemId as sourceSystem,a.glAccountCode as gfs," + + s"a.localGlAccountCode as localGl," + + s" SUM(hleAmount) as debit,SUM(sleAmount) as credit,SUM(leAmount) as total," + + s" first(b.gfsDescription) as gfsDescription," + + s" first(b.globalFunctionalArea) as globalFunctionalArea," + + s" first((case when a.sourceSystemId='project_one' then e.localGlDescription " + + s" when a.sourceSystemId='btb_latam' then b.gfsDescription else '' end)) " + + s" as localGlDescription ," + + s" first(c.bravoDescription) as bravoDescription," + + s"first(d.gtwDescription) as gtwDescription, " + + s" first(c.bravo) as bravo, first(d.gtw) as gtw from ( select ledgerCode,leCode," + + s" localCompanyCode,mrcCode,fiscalYear,sourceSystemId,localGlAccountCode," + + s" glAccountCode,last(localFunctionalAreaCode),SUM(leAmount) as leAmount," + + s" SUM(hleAmount) as hleAmount,SUM(sleAmount) as sleAmount, glAccountCode ," + + s" 'Local GL' as accountType,localGlAccountCode as localGl from " + + s" ( select ledgerCode,leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + + s" localGlAccountCode,glAccountCode,localFunctionalAreaCode,leAmount,hleAmount,sleAmount" + + s" from ujli where ledgerCode='0L' and leCode='7600' " + + s" AND fiscalYear='2017' and fiscalPeriod<=3 AND sourceSystemId='btb_latam' union all" + + s" select ledgerCode,leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + + s" localGlAccountCode,glAccountCode,localFunctionalAreaCode,leAmount,hleAmount," + + s" sleAmount from ujs where ledgerCode='0L' and leCode='7600'" + + s" AND fiscalYear='2017' AND sourceSystemId='btb_latam' ) group by ledgerCode," + + s" leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + + s" localGlAccountCode,glAccountCode ) a" + + s" left join gfs b on (a.glAccountCode=b.gfs) left join " + + s" bravo c " + + s" on (a.glAccountCode=c.gfs) left join gtw d on (a.glAccountCode=d.gfs)" + + s" left join coa e on(a.accountType=e.accountType and " + + s" a.glAccountCode = e.gfs and a.localGl = e.localGl ) group by a.leCode," + + s"a.localCompanyCode," + + s" a.mrcCode,a.sourceSystemId,a.glAccountCode,a.localGlAccountCode," + + s"c.bravo,d.gtw) group by leUniversal,gfs)") + } } diff --git a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala index 635661a7f2..8b47a276a5 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala @@ -155,7 +155,7 @@ class ColumnTableTest snc.sql(s"create or replace view $viewName as ($query)") query = s"SELECT sum(Col1) as summ FROM $tableName where col1 > .0001BD having summ > .001bD" - snc.sql(query)collect + snc.sql(query).collect snc.sql(s"create or replace view $viewName as ($query)") diff --git a/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala index 6987eaf784..38656c8100 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ViewTest.scala @@ -300,230 +300,4 @@ class ViewTest extends SnappyFunSuite { session2.sql("drop view viewOnJoin") assert(session2.sessionCatalog.tableExists("viewOnJoin") === false) } - - - test("SNAP-2342 nested query involving joins & union throws Exception") { - val session = this.snc.snappySession - - session.sql(s"create table ujli ( " + - "aagmaterial string," + - "accountassignmentgroup string," + - "accounttype string," + - "allocationcycle string," + - "allocationsegment string," + - "asset string," + - "billingdocument string," + - "billingdocumentitem string," + - "bravoequitycode string," + - "bravominorcode string," + - "bsegdocumentlinenumber string," + - "businessplace string," + - "businesstransaction string," + - "controllingarea string," + - "copadocumentnumber string," + - "copaobjectnumber string," + - "costcenter string," + - "costelement string," + - "countryofshiptocustomer string," + - "createdby string," + - "creationtime string," + - "customer string," + - "customergroup string," + - "debitcreditindicator string," + - "distributionchannel string," + - "division string," + - "documentdate string," + - "documentheadertext string," + - "documentlinenumberinsourcesystem string," + - "documentnumberinsourcesystem string," + - "documenttype string," + - "edgcreateditemindoc string," + - "entrydate string," + - "errorstatus string," + - "fidocumentquantity string," + - "fiscalperiod string," + - "fiscalyear string," + - "fsid string," + - "functionalareacode string," + - "glaccountcode string," + - "hleamount string," + - "indexfromcopa string," + - "itemcategory string," + - "itemtext string," + - "kitmaterial string," + - "kittype string," + - "leamount string," + - "lebillingtype string," + - "lecode string," + - "lecurrencycode string," + - "lesalesqty string," + - "lesalesqtyuom string," + - "ledgercode string," + - "localcompanycode string," + - "localdocumenttype string," + - "localfiscalperiod string," + - "localfiscalyear string," + - "localfunctionalareacode string," + - "localglaccountcode string," + - "locallecurrencycode string," + - "localledgercode string," + - "localmrccode string," + - "localprofitcenter string," + - "localsku string," + - "localversioncode string," + - "mrccode string," + - "parentdocumentnumberinsourcesystem string," + - "partnercostcenter string," + - "partnerfunctionalarea string," + - "partnerprofitcenter string," + - "partnersegment string," + - "payer string," + - "pcadocnumber string," + - "pcaitemnumber string," + - "plant string," + - "postingdate string," + - "postingkey string," + - "producthierarchy string," + - "psegment string," + - "rclnt string," + - "reference string," + - "referencedocument string," + - "referencetransaction string," + - "regionofshiptocustomer string," + - "salesdoctype string," + - "salesgroup string," + - "salesoffice string," + - "salesorder string," + - "salesorderitem string," + - "salesorganization string," + - "sectorproductgroup string," + - "shipto string," + - "sleamount string," + - "sourcesystemid string," + - "tradingpartner string," + - "transactioncode string," + - "transactioncurrencyamount string," + - "transactioncurrencycode string," + - "transactiontype string," + - "ujlkey string," + - "valuefieldfromcopa string," + - "vendor string," + - "versioncode string )") - - session.sql ("create table ujs (" + - "uuid string," + - "bravoequitycode string," + - "controllingarea string," + - "costcenter string," + - "creationtime string," + - "debitcreditindicator string," + - "errstatus string," + - "fiscalyear string," + - "fsid string," + - "functionalareacode string," + - "glaccountcode string," + - "hleamount string," + - "leamount string," + - "lecode string," + - "lecostelement string," + - "lecurrencycode string," + - "leplant string," + - "ledgercode string," + - "localcompanycode string," + - "localfiscalyear string," + - "localfunctionalareacode string," + - "localglaccountcode string," + - "locallecurrencycode string," + - "localledgercode string," + - "localmrccode string," + - "localprofitcenter string," + - "localversioncode string," + - "mrccode string," + - "partnerfunctionalarea string," + - "partnerprofitcenter string," + - "partnersegment string," + - "referencetransaction string," + - "sleamount string," + - "sourceadditionalkey string," + - "sourcesystemid string," + - "tradingpartner string," + - "transactioncurrencyamount string," + - "transactioncurrencycode string," + - "transactiontype string," + - "versioncode string)") - - session.sql("create table gfs (" + - "gfs string, " + - " gfsdescription string, " + - " globalfunctionalarea string )") - - session.sql("create table bravo (" + - " bravo string," + - "bravodescription string," + - " gfs string, " + - " gfsdescription string)") - - session.sql("create table gtw (" + - "gfs string," + - "gfsdescription string," + - "gtw string," + - "gtwdescription string)") - - session.sql("create table coa (" + - "accounttype string," + - "errorcode string," + - "errormessage string," + - "errorstatus string," + - "gfs string," + - "gfsdescription string," + - "globalfunctionalarea string," + - "indicevalue string," + - "localfunctionalarea string," + - "localgl string," + - "localgldescription string)") - - session.sql(s"create or replace view TrialBalance as " + - s"( select leUniversal,gfs,first(gfsDescription) as gfsDescription, " + - s"first(bravo) as bravo, " + - s"first(bravoDescription) as bravoDescription, first(gtw) as gtw, " + - s"first(gtwDescription) as gtwDescription, " + - s"first(globalFunctionalArea) as globalFunctionalArea," + - s"format_number(sum(credit),2) as credit," + - s" format_number(sum(debit),2) as debit,format_number(sum(total),2) as total from" + - s" ( select a.leCode as leUniversal,a.localCompanyCode as leLocal," + - s" a.mrcCode as mrcUniversal," + - s" a.sourceSystemId as sourceSystem,a.glAccountCode as gfs," + - s"a.localGlAccountCode as localGl," + - s" SUM(hleAmount) as debit,SUM(sleAmount) as credit,SUM(leAmount) as total," + - s" first(b.gfsDescription) as gfsDescription," + - s" first(b.globalFunctionalArea) as globalFunctionalArea," + - s" first((case when a.sourceSystemId='project_one' then e.localGlDescription " + - s" when a.sourceSystemId='btb_latam' then b.gfsDescription else '' end)) " + - s" as localGlDescription ," + - s" first(c.bravoDescription) as bravoDescription," + - s"first(d.gtwDescription) as gtwDescription, " + - s" first(c.bravo) as bravo, first(d.gtw) as gtw from ( select ledgerCode,leCode," + - s" localCompanyCode,mrcCode,fiscalYear,sourceSystemId,localGlAccountCode," + - s" glAccountCode,last(localFunctionalAreaCode),SUM(leAmount) as leAmount," + - s" SUM(hleAmount) as hleAmount,SUM(sleAmount) as sleAmount, glAccountCode ," + - s" 'Local GL' as accountType,localGlAccountCode as localGl from " + - s" ( select ledgerCode,leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + - s" localGlAccountCode,glAccountCode,localFunctionalAreaCode,leAmount,hleAmount,sleAmount" + - s" from ujli where ledgerCode='0L' and leCode='7600' " + - s" AND fiscalYear='2017' and fiscalPeriod<=3 AND sourceSystemId='btb_latam' union all" + - s" select ledgerCode,leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + - s" localGlAccountCode,glAccountCode,localFunctionalAreaCode,leAmount,hleAmount," + - s" sleAmount from ujs where ledgerCode='0L' and leCode='7600'" + - s" AND fiscalYear='2017' AND sourceSystemId='btb_latam' ) group by ledgerCode," + - s" leCode,localCompanyCode,mrcCode,fiscalYear,sourceSystemId," + - s" localGlAccountCode,glAccountCode ) a" + - s" left join gfs b on (a.glAccountCode=b.gfs) left join " + - s" bravo c " + - s" on (a.glAccountCode=c.gfs) left join gtw d on (a.glAccountCode=d.gfs)" + - s" left join coa e on(a.accountType=e.accountType and " + - s" a.glAccountCode = e.gfs and a.localGl = e.localGl ) group by a.leCode," + - s"a.localCompanyCode," + - s" a.mrcCode,a.sourceSystemId,a.glAccountCode,a.localGlAccountCode," + - s"c.bravo,d.gtw) group by leUniversal,gfs)") - } } From 4576d5afe225afea000b8ab09b9d6bac5b113c1b Mon Sep 17 00:00:00 2001 From: Sumedh Wale Date: Fri, 25 May 2018 03:43:48 +0530 Subject: [PATCH 4/4] [SNAP-2356] release compressed buffers that are skipped (#1040) - added a releaseExecutionMemory in StoreUnifiedManager like acquireExecutionMemory - in case compression does not reduce size enough, decrement accounting for the compressed buffer for heap configuration - some cleanups to change UnsafeHolder.release calls with BufferAllocator.release - fixing occasional failure in SnappyStreamingSuite - exclude abrt-watch-log from suspected exceptions in hydra that runs on CentOS/RHEL by default having "Exception" in its command-line and gets flagged as suspect - add dependency of scalaTest on check for dtests - additional logging for beforeClass/afterClass invocation --- build.gradle | 2 +- .../spark/sql/SnappySQLQuerySuite.scala | 2 +- .../org/apache/spark/sql/store/BugTest.scala | 22 ++++------ .../spark/memory/StoreUnifiedManager.scala | 8 ++++ .../sql/execution/columnar/ColumnBatch.scala | 14 +++---- .../columnar/impl/ColumnFormatEntry.scala | 39 +++++++----------- .../impl/JDBCSourceAsColumnarStore.scala | 5 +-- .../spark/sql/store/CompressionUtils.scala | 7 ++-- .../spark/sql/store/ColumnTableTest.scala | 8 ++-- .../sql/streaming/SnappyStreamingSuite.scala | 1 + dtests/build.gradle | 2 + .../hydra/SnappyHydraTestRunner.scala | 22 +++++----- .../test/dunit/DistributedTestBase.java | 41 +++++++++++++------ store | 2 +- 14 files changed, 91 insertions(+), 84 deletions(-) diff --git a/build.gradle b/build.gradle index 1c217cdd8d..4e474e4a05 100644 --- a/build.gradle +++ b/build.gradle @@ -1105,7 +1105,7 @@ task checkAll { dependsOn ":gemfire-connector:check" } if (!rootProject.hasProperty('smoke.skip')) { - dependsOn buildDtests, ":snappy-dtests_${scalaBinaryVersion}:scalaTest" + dependsOn buildDtests, ":snappy-dtests_${scalaBinaryVersion}:check" } mustRunAfter buildAll } diff --git a/cluster/src/test/scala/org/apache/spark/sql/SnappySQLQuerySuite.scala b/cluster/src/test/scala/org/apache/spark/sql/SnappySQLQuerySuite.scala index 5d3ac507da..7dbea625ce 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/SnappySQLQuerySuite.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/SnappySQLQuerySuite.scala @@ -253,7 +253,7 @@ class SnappySQLQuerySuite extends SnappyFunSuite { "(exists (select col1 from r2 where r2.col1=r1.col1) " + "or exists(select col1 from r3 where r3.col1=r1.col1))") - val result = df.collect() + df.collect() checkAnswer(df, Seq(Row(1, "1", "1", 100), Row(2, "2", "2", 2), Row(4, "4", "4", 4) )) } diff --git a/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala b/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala index bb8ef6687b..00b702a269 100644 --- a/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala +++ b/cluster/src/test/scala/org/apache/spark/sql/store/BugTest.scala @@ -16,14 +16,11 @@ */ package org.apache.spark.sql.store -import java.util.Properties - import com.pivotal.gemfirexd.Attribute import com.pivotal.gemfirexd.security.{LdapTestServer, SecurityTestUtils} -import io.snappydata.util.TestUtils -import io.snappydata.{Constant, PlanTest, SnappyFunSuite} +import io.snappydata.{Constant, SnappyFunSuite} +import org.junit.Assert.assertEquals import org.scalatest.BeforeAndAfterAll -import org.junit.Assert.{assertEquals, assertFalse, assertTrue} import org.apache.spark.SparkConf @@ -33,11 +30,11 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { override def beforeAll(): Unit = { this.stopAll() } - + protected override def newSparkConf(addOn: (SparkConf) => SparkConf): SparkConf = { val ldapProperties = SecurityTestUtils.startLdapServerAndGetBootProperties(0, 0, sysUser, getClass.getResource("/auth.ldif").getPath) - import com.pivotal.gemfirexd.Property.{AUTH_LDAP_SERVER, AUTH_LDAP_SEARCH_BASE} + import com.pivotal.gemfirexd.Property.{AUTH_LDAP_SEARCH_BASE, AUTH_LDAP_SERVER} for (k <- List(Attribute.AUTH_PROVIDER, AUTH_LDAP_SERVER, AUTH_LDAP_SEARCH_BASE)) { System.setProperty(k, ldapProperties.getProperty(k)) } @@ -63,11 +60,11 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { if (ldapServer.isServerStarted) { ldapServer.stopService() } - import com.pivotal.gemfirexd.Property.{AUTH_LDAP_SERVER, AUTH_LDAP_SEARCH_BASE} + import com.pivotal.gemfirexd.Property.{AUTH_LDAP_SEARCH_BASE, AUTH_LDAP_SERVER} for (k <- List(Attribute.AUTH_PROVIDER, AUTH_LDAP_SERVER, AUTH_LDAP_SEARCH_BASE)) { System.clearProperty(k) System.clearProperty("gemfirexd." + k) - System.clearProperty(Constant.STORE_PROPERTY_PREFIX + k) + System.clearProperty(Constant.STORE_PROPERTY_PREFIX + k) } System.clearProperty(Constant.STORE_PROPERTY_PREFIX + Attribute.USERNAME_ATTR) System.clearProperty(Constant.STORE_PROPERTY_PREFIX + Attribute.PASSWORD_ATTR) @@ -89,8 +86,7 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { // TODO : Use the actual connection pool limit val limit = 500 - - for (i <- 1 to limit) { + for (_ <- 1 to limit) { val snc2 = snc.newSession() snc2.snappySession.conf.set(Attribute.USERNAME_ATTR, user2) snc2.snappySession.conf.set(Attribute.PASSWORD_ATTR, user2) @@ -106,7 +102,7 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { val session = snc.newSession() session.snappySession.conf.set(Attribute.USERNAME_ATTR, user1) session.snappySession.conf.set(Attribute.PASSWORD_ATTR, user1) - + session.sql(s"create table ujli ( " + "aagmaterial string," + "accountassignmentgroup string," + @@ -212,7 +208,7 @@ class BugTest extends SnappyFunSuite with BeforeAndAfterAll { "vendor string," + "versioncode string )") - session.sql ("create table ujs (" + + session.sql("create table ujs (" + "uuid string," + "bravoequitycode string," + "controllingarea string," + diff --git a/core/src/main/scala/org/apache/spark/memory/StoreUnifiedManager.scala b/core/src/main/scala/org/apache/spark/memory/StoreUnifiedManager.scala index f740c5425f..101ad42375 100644 --- a/core/src/main/scala/org/apache/spark/memory/StoreUnifiedManager.scala +++ b/core/src/main/scala/org/apache/spark/memory/StoreUnifiedManager.scala @@ -209,6 +209,14 @@ object MemoryManagerCallback extends Logging { } allocator.allocate(size, owner).order(ByteOrder.LITTLE_ENDIAN) } + + /** release and accounting for byte buffer allocated by [[allocateExecutionMemory]] */ + def releaseExecutionMemory(buffer: ByteBuffer, owner: String, releaseBuffer: Boolean): Unit = { + if (releaseBuffer) BufferAllocator.releaseBuffer(buffer) + if (buffer.hasArray) { + StoreCallbacksImpl.releaseStorageMemory(owner, buffer.capacity(), offHeap = false) + } + } } final class DefaultMemoryConsumer(taskMemoryManager: TaskMemoryManager, diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBatch.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBatch.scala index 954d2dc049..2aa5d04349 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBatch.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBatch.scala @@ -26,14 +26,14 @@ import scala.util.control.NonFatal import com.gemstone.gemfire.cache.EntryDestroyedException import com.gemstone.gemfire.internal.cache.{BucketRegion, GemFireCacheImpl, LocalRegion, NonLocalRegionEntry, PartitionedRegion, RegionEntry, TXStateInterface} -import com.gemstone.gemfire.internal.shared.FetchRequest -import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder +import com.gemstone.gemfire.internal.shared.{BufferAllocator, FetchRequest} import com.koloboke.function.IntObjPredicate import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer import com.pivotal.gemfirexd.internal.impl.jdbc.EmbedConnection import io.snappydata.collection.IntObjectHashMap import io.snappydata.thrift.common.BufferedBlob +import org.apache.spark.memory.MemoryManagerCallback import org.apache.spark.sql.execution.columnar.encoding.{ColumnDecoder, ColumnDeleteDecoder, ColumnEncoding, UpdatedColumnDecoder, UpdatedColumnDecoderBase} import org.apache.spark.sql.execution.columnar.impl._ import org.apache.spark.sql.execution.row.PRValuesIterator @@ -333,7 +333,7 @@ final class ColumnBatchIteratorOnRS(conn: Connection, val result = CompressionUtils.codecDecompressIfRequired( buffer.order(ByteOrder.LITTLE_ENDIAN), allocator) if (result ne buffer) { - UnsafeHolder.releaseIfDirectBuffer(buffer) + BufferAllocator.releaseBuffer(buffer) // decompressed buffer will be ordered by LITTLE_ENDIAN while non-decompressed // is returned with BIG_ENDIAN in order to distinguish the two cases result @@ -404,11 +404,11 @@ final class ColumnBatchIteratorOnRS(conn: Connection, override def test(col: Int, buffer: ByteBuffer): Boolean = { // release previous set of buffers immediately if (buffer ne null) { - if (buffer.isDirect) UnsafeHolder.releaseDirectBuffer(buffer) // release from accounting if decompressed buffer - else if (buffer.order() eq ByteOrder.LITTLE_ENDIAN) { - StoreCallbacksImpl.releaseStorageMemory(CompressionUtils.DECOMPRESSION_OWNER, - buffer.capacity(), offHeap = false) + if (!BufferAllocator.releaseBuffer(buffer) && + (buffer.order() eq ByteOrder.LITTLE_ENDIAN)) { + MemoryManagerCallback.releaseExecutionMemory(buffer, + CompressionUtils.DECOMPRESSION_OWNER, releaseBuffer = false) } } true diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatEntry.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatEntry.scala index b7a1dd6feb..4d863522cd 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatEntry.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/ColumnFormatEntry.scala @@ -30,7 +30,7 @@ import com.gemstone.gemfire.internal.cache.partitioned.PREntriesIterator import com.gemstone.gemfire.internal.cache.persistence.DiskRegionView import com.gemstone.gemfire.internal.cache.store.SerializedDiskBuffer import com.gemstone.gemfire.internal.shared._ -import com.gemstone.gemfire.internal.shared.unsafe.{DirectBufferAllocator, UnsafeHolder} +import com.gemstone.gemfire.internal.shared.unsafe.DirectBufferAllocator import com.gemstone.gemfire.internal.size.ReflectionSingleObjectSizer.REFERENCE_SIZE import com.gemstone.gemfire.internal.{ByteBufferDataInput, DSCODE, DSFIDFactory, DataSerializableFixedID, HeapDataOutputStream} import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils @@ -461,14 +461,13 @@ class ColumnFormatValue extends SerializedDiskBuffer val perfStats = getCachePerfStats(context) val startDecompression = perfStats.startDecompression() val decompressed = CompressionUtils.codecDecompress(buffer, allocator, position, -typeId) - val isManagedDirect = allocator.isManagedDirect try { // update decompression stats perfStats.endDecompression(startDecompression) val newValue = copy(decompressed, isCompressed = false, changeOwnerToStorage = false) if (!isDirect || this.refCount <= 2) { val updateStats = (context ne null) && !fromDisk - if (updateStats && !isManagedDirect) { + if (updateStats && !isDirect) { // acquire the increased memory after decompression val numBytes = decompressed.capacity() - buffer.capacity() if (!StoreCallbacksImpl.acquireStorageMemory(context.getFullPath, @@ -483,9 +482,7 @@ class ColumnFormatValue extends SerializedDiskBuffer } this.columnBuffer = newBuffer this.decompressionState = 1 - if (isDirect) { - UnsafeHolder.releaseDirectBuffer(buffer) - } + allocator.release(buffer) perfStats.incDecompressedReplaced() this } else { @@ -493,12 +490,10 @@ class ColumnFormatValue extends SerializedDiskBuffer newValue } } finally { - if (!isManagedDirect) { - // release the memory acquired for decompression - // (any on-the-fly returned buffer will be part of runtime overhead) - StoreCallbacksImpl.releaseStorageMemory(CompressionUtils.DECOMPRESSION_OWNER, - decompressed.capacity(), offHeap = false) - } + // release the memory acquired for decompression + // (any on-the-fly returned buffer will be part of runtime overhead) + MemoryManagerCallback.releaseExecutionMemory(decompressed, + CompressionUtils.DECOMPRESSION_OWNER, releaseBuffer = false) } } } @@ -523,7 +518,6 @@ class ColumnFormatValue extends SerializedDiskBuffer val compressed = CompressionUtils.codecCompress(compressionCodecId, buffer, bufferLen, allocator) if (compressed ne buffer) { - val isManagedDirect = allocator.isManagedDirect try { // update compression stats perfStats.endCompression(startCompression, bufferLen, compressed.limit()) @@ -534,9 +528,7 @@ class ColumnFormatValue extends SerializedDiskBuffer val newBuffer = if (compressed.capacity() >= size + 32) { val trimmed = allocator.allocateForStorage(size).order(ByteOrder.LITTLE_ENDIAN) trimmed.put(compressed) - if (isDirect) { - UnsafeHolder.releaseDirectBuffer(compressed) - } + allocator.release(compressed) trimmed.rewind() trimmed } else transferToStorage(compressed, allocator) @@ -547,11 +539,10 @@ class ColumnFormatValue extends SerializedDiskBuffer } this.columnBuffer = newBuffer this.decompressionState = 0 - if (isDirect) { - UnsafeHolder.releaseDirectBuffer(buffer) - } - // release storage memory - if (updateStats && !isManagedDirect) { + allocator.release(buffer) + // release storage memory for the buffer being transferred to storage + // (memory for compressed buffer will be marked released in any case) + if (updateStats && !isDirect) { StoreCallbacksImpl.releaseStorageMemory(context.getFullPath, buffer.capacity() - newBuffer.capacity(), offHeap = false) } @@ -566,10 +557,8 @@ class ColumnFormatValue extends SerializedDiskBuffer } finally { // release the memory acquired for compression // (any on-the-fly returned buffer will be part of runtime overhead) - if (!isManagedDirect) { - StoreCallbacksImpl.releaseStorageMemory(CompressionUtils.COMPRESSION_OWNER, - compressed.capacity(), offHeap = false) - } + MemoryManagerCallback.releaseExecutionMemory(compressed, + CompressionUtils.COMPRESSION_OWNER, releaseBuffer = false) } } else { // update skipped compression stats diff --git a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/JDBCSourceAsColumnarStore.scala b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/JDBCSourceAsColumnarStore.scala index 93be1a9b04..29d726bece 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/JDBCSourceAsColumnarStore.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/columnar/impl/JDBCSourceAsColumnarStore.scala @@ -29,8 +29,7 @@ import com.esotericsoftware.kryo.io.{Input, Output} import com.esotericsoftware.kryo.{Kryo, KryoSerializable} import com.gemstone.gemfire.cache.IsolationLevel import com.gemstone.gemfire.internal.cache.{BucketRegion, CachePerfStats, GemFireCacheImpl, LocalRegion, PartitionedRegion, TXManagerImpl} -import com.gemstone.gemfire.internal.shared.SystemProperties -import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder +import com.gemstone.gemfire.internal.shared.{BufferAllocator, SystemProperties} import com.pivotal.gemfirexd.internal.engine.Misc import com.pivotal.gemfirexd.internal.engine.ddl.catalog.GfxdSystemProcedures import com.pivotal.gemfirexd.internal.iapi.services.context.ContextService @@ -599,7 +598,7 @@ class JDBCSourceAsColumnarStore(private var _connProperties: ConnectionPropertie iter.next() } // release the batch buffers - batch.buffers.foreach(UnsafeHolder.releaseIfDirectBuffer) + batch.buffers.foreach(b => if (b ne null) BufferAllocator.releaseBuffer(b)) } } diff --git a/core/src/main/scala/org/apache/spark/sql/store/CompressionUtils.scala b/core/src/main/scala/org/apache/spark/sql/store/CompressionUtils.scala index 2081bfc400..6488fa92b9 100644 --- a/core/src/main/scala/org/apache/spark/sql/store/CompressionUtils.scala +++ b/core/src/main/scala/org/apache/spark/sql/store/CompressionUtils.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.store import java.nio.{ByteBuffer, ByteOrder} -import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder import com.gemstone.gemfire.internal.shared.{BufferAllocator, HeapBufferAllocator, SystemProperties} import com.ning.compress.lzf.{LZFDecoder, LZFEncoder} import io.snappydata.Constant @@ -27,7 +26,7 @@ import net.jpountz.lz4.LZ4Factory import org.xerial.snappy.Snappy import org.apache.spark.io.{CompressionCodec, LZ4CompressionCodec, LZFCompressionCodec, SnappyCompressionCodec} -import org.apache.spark.memory.MemoryManagerCallback.allocateExecutionMemory +import org.apache.spark.memory.MemoryManagerCallback.{allocateExecutionMemory, releaseExecutionMemory} /** * Utility methods for compression/decompression. @@ -92,8 +91,8 @@ object CompressionUtils { result.limit(resultLen + COMPRESSION_HEADER_SIZE) result } else { - // release the compressed buffer if required - UnsafeHolder.releaseIfDirectBuffer(result) + // release the compressed buffer + releaseExecutionMemory(result, COMPRESSION_OWNER, releaseBuffer = true) input } } diff --git a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala index 8b47a276a5..d397af8a22 100644 --- a/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala +++ b/core/src/test/scala/org/apache/spark/sql/store/ColumnTableTest.scala @@ -150,22 +150,22 @@ class ColumnTableTest dataDF.write.insertInto(tableName) var query = s"SELECT sum(Col1) as summ FROM $tableName where col1 > .0001 having summ > .001" - snc.sql(query).collect + snc.sql(query).collect() snc.sql(s"create or replace view $viewName as ($query)") query = s"SELECT sum(Col1) as summ FROM $tableName where col1 > .0001BD having summ > .001bD" - snc.sql(query).collect + snc.sql(query).collect() snc.sql(s"create or replace view $viewName as ($query)") query = s"SELECT sum(Col1) as summ FROM $tableName having summ > .001f" - snc.sql(query).collect + snc.sql(query).collect() snc.sql(s"create or replace view $viewName as ($query)") query = s"SELECT sum(Col1) as summ FROM $tableName having summ > .001d" - snc.sql(query).collect + snc.sql(query).collect() snc.sql(s"create or replace view $viewName as ($query)") diff --git a/core/src/test/scala/org/apache/spark/sql/streaming/SnappyStreamingSuite.scala b/core/src/test/scala/org/apache/spark/sql/streaming/SnappyStreamingSuite.scala index 94e954e1c6..be29d64412 100644 --- a/core/src/test/scala/org/apache/spark/sql/streaming/SnappyStreamingSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/streaming/SnappyStreamingSuite.scala @@ -240,6 +240,7 @@ class SnappyStreamingSuite kafkaUtils.createTopic(topic) val add = kafkaUtils.brokerAddress + ssnc.sql("drop table if exists directKafkaStream") ssnc.sql("create stream table directKafkaStream (" + " publisher string, advertiser string)" + " using directkafka_stream options(" + diff --git a/dtests/build.gradle b/dtests/build.gradle index f243f35dd9..7a53c6e555 100644 --- a/dtests/build.gradle +++ b/dtests/build.gradle @@ -156,3 +156,5 @@ testClasses.doLast { } } } + +check.dependsOn test, scalaTest diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyHydraTestRunner.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyHydraTestRunner.scala index 058ba5f2c3..eb52fd3617 100644 --- a/dtests/src/test/scala/io/snappydata/hydra/SnappyHydraTestRunner.scala +++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyHydraTestRunner.scala @@ -23,8 +23,8 @@ import io.snappydata.SnappyTestRunner import scala.sys.process._ /** - * Class extending can mix match methods like searchExceptions - */ + * Class extending can mix match methods like searchExceptions + */ class SnappyHydraTestRunner extends SnappyTestRunner { var SNAPPYDATA_SOURCE_DIR = "" @@ -52,19 +52,19 @@ class SnappyHydraTestRunner extends SnappyTestRunner { val c14 = "grep -v java.lang.reflect.InvocationTargetException" val c15 = "grep -v org.apache.spark.storage.ShuffleBlockFetcherIterator." + "throwFetchFailedException" - /*val c16 = "grep -v org.apache.spark.SparkException:[[:space:]]*Exception[[:space:]]*thrown" + - "[[:space:]]*in[[:space:]]*awaitResult"*/ val c16 = Seq("grep", "-v", "org.apache.spark.SparkException: Exception thrown in awaitResult") - /*val c17 = "grep \'status:[[:space:]]*stopping\'[[:space:]]*-e[[:space:]]*\'java.lang" + - ".IllegalStateException\'"*/ + /* val c17 = "grep \'status:[[:space:]]*stopping\'[[:space:]]*-e[[:space:]]*\'java.lang" + + ".IllegalStateException\'" */ val c18 = "grep -v com.gemstone.gemfire.distributed.LockServiceDestroyedException" - /*val c19 = "grep GemFireIOException:[[:space:]]*Current[[:space:]]*operations[[:space:]]*did" + + /* + val c19 = "grep GemFireIOException:[[:space:]]*Current[[:space:]]*operations[[:space:]]*did" + "[[:space:]]*not[[:space:]]*distribute[[:space:]]*within" val c20 = "grep SparkException:[[:space:]]*External[[:space:]]*scheduler[[:space:]]*cannot" + - "[[:space:]]*be[[:space:]]*instantiated"*/ + "[[:space:]]*be[[:space:]]*instantiated" */ val c21 = Seq("grep", "-v", "Failed to retrieve information for") + val c22 = "grep -v abrt-watch-log" val command1 = c1 #| c2 #| c3 #| c4 #| c5 #| c6 #| c7 #| c8 #| c12 #| c13 #| c14 #| c15 #| - c16 #| /*c17 #|*/ c18 #| /*c19 #| c20 #|*/ c21 + c16 #| /* c17 #| */ c18 #| /* c19 #| c20 #| */ c21 #| c22 // TODO : handle case where the logDir path is incorrect or doesn't exists try { val output1: String = command1.!! @@ -73,7 +73,7 @@ class SnappyHydraTestRunner extends SnappyTestRunner { } catch { case r: java.lang.RuntimeException => - if (r.getMessage().contains("Nonzero exit value: 1")) { + if (r.getMessage.contains("Nonzero exit value: 1")) { // scalastyle:off println println("No unexpected Exceptions observed during smoke bt run.") } @@ -83,6 +83,4 @@ class SnappyHydraTestRunner extends SnappyTestRunner { case i: Throwable => throw i } } - } - diff --git a/dunit/src/main/java/io/snappydata/test/dunit/DistributedTestBase.java b/dunit/src/main/java/io/snappydata/test/dunit/DistributedTestBase.java index 70d191280c..ec5052d3af 100755 --- a/dunit/src/main/java/io/snappydata/test/dunit/DistributedTestBase.java +++ b/dunit/src/main/java/io/snappydata/test/dunit/DistributedTestBase.java @@ -695,15 +695,21 @@ public void setUp() throws Exception { logTestHistory(); testName = getName(); + Class thisClass = getClass(); + Logger logger = getLogWriter(); if (!beforeClassDone) { + lastTest = null; + logger.info("[setup] Invoking beforeClass for " + + thisClass.getSimpleName() + "." + testName + "\n"); beforeClass(); beforeClassDone = true; - lastTest = null; + System.out.println("\n[setup] Invoked beforeClass for " + + thisClass.getSimpleName() + "." + testName + "\n"); } if (lastTest == null) { // for class-level afterClass, list the test methods and do the // afterClass in the tearDown of last method - Class scanClass = getClass(); + Class scanClass = thisClass; while (Test.class.isAssignableFrom(scanClass)) { for (Method m : MethodSorter.getDeclaredMethods(scanClass)) { String methodName = m.getName(); @@ -716,16 +722,15 @@ public void setUp() throws Exception { scanClass = scanClass.getSuperclass(); } if (lastTest == null) { - fail("Could not find any last test in " + getClass().getName()); + fail("Could not find any last test in " + thisClass.getName()); } else { - getLogWriter().info( - "Last test for " + getClass().getName() + ": " + lastTest); + logger.info("[setup] Last test for " + thisClass.getName() + + ": " + lastTest); } } if (testName != null) { String baseDefaultDiskStoreName = getTestClass().getCanonicalName() + "." + getTestName(); - final String className = getClass().getName(); for (int h = 0; h < Host.getHostCount(); h++) { Host host = Host.getHost(h); for (int v = 0; v < host.getVMCount(); v++) { @@ -736,7 +741,8 @@ public void setUp() throws Exception { } } } - System.out.println("\n\n[setup] START TEST " + getClass().getSimpleName() + "." + testName + "\n\n"); + System.out.println("\n\n[setup] START TEST " + thisClass.getSimpleName() + + "." + testName + "\n\n"); } /** @@ -780,12 +786,21 @@ public final void tearDown() throws Exception { } } } finally { - tearDownAfter(); - - if (getName().equals(lastTest)) { - afterClass(); - beforeClassDone = false; - lastTest = null; + final boolean isLastTest = getName().equals(lastTest); + try { + tearDownAfter(); + if (isLastTest) { + System.out.println("\n[tearDown] Invoking afterClass post " + + getClass().getSimpleName() + "." + testName + "\n"); + afterClass(); + System.out.println("\n[tearDown] Invoked afterClass post " + + getClass().getSimpleName() + "." + testName + "\n"); + } + } finally { + if (isLastTest) { + beforeClassDone = false; + lastTest = null; + } } } } diff --git a/store b/store index 28a4a9ae51..e806f70d70 160000 --- a/store +++ b/store @@ -1 +1 @@ -Subproject commit 28a4a9ae5120e5698c26f97fde13098f6d14a587 +Subproject commit e806f70d704facd1d09533dc54644b19d73ea54f