diff --git a/UserConfig.json b/UserConfig.json index 316c22938..4754c4fd9 100644 --- a/UserConfig.json +++ b/UserConfig.json @@ -24,6 +24,9 @@ "explain_vectorized": false, "explain_obj_ref_mgnt": false, "explain_mlir_codegen": false, + "explain_mlir_codegen_sparsity_exploiting_op_fusion": false, + "explain_mlir_codegen_daphneir_to_mlir": false, + "explain_mlir_codegen_mlir_specific": false, "taskPartitioningScheme": "STATIC", "numberOfThreads": -1, "minimumTaskSize": 1, diff --git a/src/api/cli/DaphneUserConfig.h b/src/api/cli/DaphneUserConfig.h index 13bfdf4b1..8409d7546 100644 --- a/src/api/cli/DaphneUserConfig.h +++ b/src/api/cli/DaphneUserConfig.h @@ -74,6 +74,9 @@ struct DaphneUserConfig { bool explain_vectorized = false; bool explain_obj_ref_mgnt = false; bool explain_mlir_codegen = false; + bool explain_mlir_codegen_sparsity_exploiting_op_fusion = false; + bool explain_mlir_codegen_daphneir_to_mlir = false; + bool explain_mlir_codegen_mlir_specific = false; bool statistics = false; bool force_cuda = false; diff --git a/src/api/internal/daphne_internal.cpp b/src/api/internal/daphne_internal.cpp index b63be6f0b..e5c8fb0c0 100644 --- a/src/api/internal/daphne_internal.cpp +++ b/src/api/internal/daphne_internal.cpp @@ -287,26 +287,35 @@ int startDAPHNE(int argc, const char **argv, DaphneLibResult *daphneLibRes, int type_adaptation, vectorized, obj_ref_mgnt, - mlir_codegen + mlir_codegen, + mlir_codegen_sparsity_exploiting_op_fusion, + mlir_codegen_daphneir_to_mlir, + mlir_codegen_mlir_specific }; static llvm::cl::list explainArgList( "explain", cat(daphneOptions), llvm::cl::desc("Show DaphneIR after certain compiler passes (separate " "multiple values by comma, the order is irrelevant)"), - llvm::cl::values(clEnumVal(parsing, "Show DaphneIR after parsing"), - clEnumVal(parsing_simplified, "Show DaphneIR after parsing and some simplifications"), - clEnumVal(sql, "Show DaphneIR after SQL parsing"), - clEnumVal(property_inference, "Show DaphneIR after property inference"), - clEnumVal(select_matrix_repr, "Show DaphneIR after selecting " - "physical matrix representations"), - clEnumVal(phy_op_selection, "Show DaphneIR after selecting physical operators"), - clEnumVal(type_adaptation, "Show DaphneIR after adapting types to available kernels"), - clEnumVal(vectorized, "Show DaphneIR after vectorization"), - clEnumVal(obj_ref_mgnt, "Show DaphneIR after managing object references"), - clEnumVal(kernels, "Show DaphneIR after kernel lowering"), - clEnumVal(llvm, "Show DaphneIR after llvm lowering"), - clEnumVal(mlir_codegen, "Show DaphneIR after MLIR codegen")), + llvm::cl::values( + clEnumVal(parsing, "Show DaphneIR after parsing"), + clEnumVal(parsing_simplified, "Show DaphneIR after parsing and some simplifications"), + clEnumVal(sql, "Show DaphneIR after SQL parsing"), + clEnumVal(property_inference, "Show DaphneIR after property inference"), + clEnumVal(select_matrix_repr, "Show DaphneIR after selecting " + "physical matrix representations"), + clEnumVal(phy_op_selection, "Show DaphneIR after selecting physical operators"), + clEnumVal(type_adaptation, "Show DaphneIR after adapting types to available kernels"), + clEnumVal(vectorized, "Show DaphneIR after vectorization"), + clEnumVal(obj_ref_mgnt, "Show DaphneIR after managing object references"), + clEnumVal(kernels, "Show DaphneIR after kernel lowering"), + clEnumVal(llvm, "Show DaphneIR after llvm lowering"), + clEnumVal(mlir_codegen, "Show DaphneIR after MLIR codegen"), + clEnumVal(mlir_codegen_sparsity_exploiting_op_fusion, + "Show DaphneIR after MLIR codegen (sparsity-exploiting operator fusion)"), + clEnumVal(mlir_codegen_daphneir_to_mlir, "Show DaphneIR after MLIR codegen (DaphneIR to MLIR)"), + clEnumVal(mlir_codegen_mlir_specific, "Show DaphneIR after MLIR codegen (MLIR-specific)"), + clEnumVal(llvm, "Show DaphneIR after llvm lowering")), CommaSeparated); static llvm::cl::list scriptArgs1("args", cat(daphneOptions), @@ -479,6 +488,15 @@ int startDAPHNE(int argc, const char **argv, DaphneLibResult *daphneLibRes, int case mlir_codegen: user_config.explain_mlir_codegen = true; break; + case mlir_codegen_sparsity_exploiting_op_fusion: + user_config.explain_mlir_codegen_sparsity_exploiting_op_fusion = true; + break; + case mlir_codegen_daphneir_to_mlir: + user_config.explain_mlir_codegen_daphneir_to_mlir = true; + break; + case mlir_codegen_mlir_specific: + user_config.explain_mlir_codegen_mlir_specific = true; + break; } } diff --git a/src/compiler/execution/DaphneIrExecutor.cpp b/src/compiler/execution/DaphneIrExecutor.cpp index 651c4937a..0e5c0c4fd 100644 --- a/src/compiler/execution/DaphneIrExecutor.cpp +++ b/src/compiler/execution/DaphneIrExecutor.cpp @@ -263,6 +263,8 @@ void DaphneIrExecutor::buildCodegenPipeline(mlir::PassManager &pm) { // SparseExploit fuses multiple operations which only need to be lowered if still needed elsewhere. // Todo: if possible, run only if SparseExploitLowering was successful. pm.addPass(mlir::createCanonicalizerPass()); + if (userConfig_.explain_mlir_codegen_sparsity_exploiting_op_fusion) + pm.addPass(mlir::daphne::createPrintIRPass("IR after MLIR codegen (sparsity-exploiting operator fusion):")); pm.addPass(mlir::daphne::createEwOpLoweringPass()); pm.addPass(mlir::daphne::createAggAllOpLoweringPass()); @@ -283,6 +285,9 @@ void DaphneIrExecutor::buildCodegenPipeline(mlir::PassManager &pm) { pm.addPass(mlir::daphne::createPrintIRPass("IR directly after lowering MatMulOp.")); } + if (userConfig_.explain_mlir_codegen_daphneir_to_mlir) + pm.addPass(mlir::daphne::createPrintIRPass("IR after MLIR codegen (DaphneIR to MLIR):")); + pm.addPass(mlir::createConvertMathToLLVMPass()); pm.addPass(mlir::daphne::createModOpLoweringPass()); pm.addPass(mlir::createCanonicalizerPass()); @@ -301,4 +306,6 @@ void DaphneIrExecutor::buildCodegenPipeline(mlir::PassManager &pm) { if (userConfig_.explain_mlir_codegen) pm.addPass(mlir::daphne::createPrintIRPass("IR after codegen pipeline")); + if (userConfig_.explain_mlir_codegen_mlir_specific) + pm.addPass(mlir::daphne::createPrintIRPass("IR after MLIR codegen (MLIR-specific):")); } diff --git a/src/parser/config/ConfigParser.cpp b/src/parser/config/ConfigParser.cpp index 2a7b6a28c..47d3def14 100644 --- a/src/parser/config/ConfigParser.cpp +++ b/src/parser/config/ConfigParser.cpp @@ -104,6 +104,15 @@ void ConfigParser::readUserConfig(const std::string &filename, DaphneUserConfig config.explain_obj_ref_mgnt = jf.at(DaphneConfigJsonParams::EXPLAIN_OBJ_REF_MGNT).get(); if (keyExists(jf, DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN)) config.explain_mlir_codegen = jf.at(DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN).get(); + if (keyExists(jf, DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_SPARSITY_EXPLOITING_OP_FUSION)) + config.explain_mlir_codegen_sparsity_exploiting_op_fusion = + jf.at(DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_SPARSITY_EXPLOITING_OP_FUSION).get(); + if (keyExists(jf, DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_DAPHNEIR_TO_MLIR)) + config.explain_mlir_codegen_daphneir_to_mlir = + jf.at(DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_DAPHNEIR_TO_MLIR).get(); + if (keyExists(jf, DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_MLIR_SPECIFIC)) + config.explain_mlir_codegen_mlir_specific = + jf.at(DaphneConfigJsonParams::EXPLAIN_MLIR_CODEGEN_MLIR_SPECIFIC).get(); if (keyExists(jf, DaphneConfigJsonParams::TASK_PARTITIONING_SCHEME)) { config.taskPartitioningScheme = jf.at(DaphneConfigJsonParams::TASK_PARTITIONING_SCHEME).get(); diff --git a/src/parser/config/JsonParams.h b/src/parser/config/JsonParams.h index d2c964cf9..3a6717497 100644 --- a/src/parser/config/JsonParams.h +++ b/src/parser/config/JsonParams.h @@ -54,6 +54,10 @@ struct DaphneConfigJsonParams { inline static const std::string EXPLAIN_VECTORIZED = "explain_vectorized"; inline static const std::string EXPLAIN_OBJ_REF_MGNT = "explain_obj_ref_mgnt"; inline static const std::string EXPLAIN_MLIR_CODEGEN = "explain_mlir_codegen"; + inline static const std::string EXPLAIN_MLIR_CODEGEN_SPARSITY_EXPLOITING_OP_FUSION = + "explain_mlir_codegen_sparsity_exploiting_op_fusion"; + inline static const std::string EXPLAIN_MLIR_CODEGEN_DAPHNEIR_TO_MLIR = "explain_mlir_codegen_daphneir_to_mlir"; + inline static const std::string EXPLAIN_MLIR_CODEGEN_MLIR_SPECIFIC = "explain_mlir_codegen_mlir_specific"; inline static const std::string TASK_PARTITIONING_SCHEME = "taskPartitioningScheme"; inline static const std::string NUMBER_OF_THREADS = "numberOfThreads"; inline static const std::string MINIMUM_TASK_SIZE = "minimumTaskSize"; @@ -95,6 +99,9 @@ struct DaphneConfigJsonParams { EXPLAIN_TYPE_ADAPTATION, EXPLAIN_VECTORIZED, EXPLAIN_MLIR_CODEGEN, + EXPLAIN_MLIR_CODEGEN_SPARSITY_EXPLOITING_OP_FUSION, + EXPLAIN_MLIR_CODEGEN_DAPHNEIR_TO_MLIR, + EXPLAIN_MLIR_CODEGEN_MLIR_SPECIFIC, EXPLAIN_OBJ_REF_MGNT, TASK_PARTITIONING_SCHEME, NUMBER_OF_THREADS,