From 072a89382191aefafbef709e311b1c2d7a2ebaab Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 24 Oct 2024 10:10:06 +0100 Subject: [PATCH 01/13] HPCC-32867 Remove invalid comma separator from roxie COMPLETE line Signed-off-by: Gavin Halliday --- roxie/ccd/ccdlistener.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roxie/ccd/ccdlistener.cpp b/roxie/ccd/ccdlistener.cpp index 85fd1241a4e..19c4bc81b91 100644 --- a/roxie/ccd/ccdlistener.cpp +++ b/roxie/ccd/ccdlistener.cpp @@ -967,7 +967,7 @@ StringBuffer & ContextLogger::getStats(StringBuffer &s) const ids.append(slowestActivityIds[i]); formatStatistic(times, cycle_to_nanosec(slowestActivityTimes[i]), SMeasureTimeNs); } - s.appendf(", slowestActivities={ ids=[%s] times=[%s] }", ids.str(), times.str()); + s.appendf(" slowestActivities={ ids=[%s] times=[%s] }", ids.str(), times.str()); } return s; } From 7b3f72cab97e6f8df7f41c60dd3deac1e45e808f Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 24 Oct 2024 16:45:23 +0100 Subject: [PATCH 02/13] Split off 9.6.60 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- version.cmake | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index d8088466d00..8071839ef0b 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.59-closedown0 +version: 9.6.61-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.59-closedown0 +appVersion: 9.6.61-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index c6f048f8a40..2c0e5a9740c 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1477,7 +1477,7 @@ Pass in dict with .root, .visibility defined {{- end -}} {{- define "hpcc.generateHelmVersion" -}} -helmVersion: 9.6.59-closedown0 +helmVersion: 9.6.61-closedown0 {{- end -}} {{/* diff --git a/version.cmake b/version.cmake index e9a7d9bb1e5..7f45aebd4e1 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 59 ) +set ( HPCC_POINT 61 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-10-18T15:32:01Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-10-24T15:45:22Z" ) ### From 415de2558bb5be51ba76d0a829d07d1babcc87c5 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 24 Oct 2024 16:46:31 +0100 Subject: [PATCH 03/13] Split off 9.4.108 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- version.cmake | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 3ec28c099ec..5ce36126d20 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.107-closedown0 +version: 9.4.109-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.107-closedown0 +appVersion: 9.4.109-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index cce63f1e4ee..2f6b3bf4504 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1473,7 +1473,7 @@ Pass in dict with .root, .visibility defined {{- end -}} {{- define "hpcc.generateHelmVersion" -}} -helmVersion: 9.4.107-closedown0 +helmVersion: 9.4.109-closedown0 {{- end -}} {{/* diff --git a/version.cmake b/version.cmake index 72e0161a682..eff94d9919c 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 107 ) +set ( HPCC_POINT 109 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-10-18T15:32:52Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-10-24T15:46:30Z" ) ### From d7b689bc071844df09328a442ca22a533d7e1075 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 24 Oct 2024 16:47:38 +0100 Subject: [PATCH 04/13] Split off 9.2.134 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- version.cmake | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 4ce3ab22457..7954fd07890 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.133-closedown0 +version: 9.2.135-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.133-closedown0 +appVersion: 9.2.135-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ea2cbb8672c..d966e72fd0c 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1361,7 +1361,7 @@ Pass in dict with .root, .visibility defined {{- end -}} {{- define "hpcc.generateHelmVersion" -}} -helmVersion: 9.2.133-closedown0 +helmVersion: 9.2.135-closedown0 {{- end -}} {{/* diff --git a/version.cmake b/version.cmake index 6e656f9b5a0..b779e1a391f 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 133 ) +set ( HPCC_POINT 135 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-10-18T15:33:39Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-10-24T15:47:37Z" ) ### From d4c4918248345292a258cd66d37af99421fb1d38 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Fri, 25 Oct 2024 11:36:47 +0100 Subject: [PATCH 05/13] HPCC-32879 Fix issue with FileAttrLock being used on superfiles For superfiles, use DXB_SuperFile for DfsXmlBranchKind argument in the CFileAttrLock::init() call. (If this is not done, superfile's attributes cannot be locked as dali attempt to lock a non-existant path). Signed-off-by: Shamser Ahmed --- dali/base/dadfs.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index e22e5c7b487..7b008f187b4 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -2912,7 +2912,7 @@ class CDistributedFileBase : implements INTERFACE, public CInterface if (history) queryAttributes().removeTree(history); } - void lockFileAttrLock(CFileAttrLock & attrLock) + virtual void lockFileAttrLock(CFileAttrLock & attrLock) { if (!attrLock.init(logicalName, DXB_File, RTM_LOCK_WRITE, conn, defaultTimeout, "CDistributedFile::lockFileAttrLock")) { @@ -6482,6 +6482,19 @@ class CDistributedSuperFile: public CDistributedFileBase return new cSubFileIterator(subfiles,supersub); } + virtual void lockFileAttrLock(CFileAttrLock & attrLock) override + { + if (!attrLock.init(logicalName, DXB_SuperFile, RTM_LOCK_WRITE, conn, defaultTimeout, "CDistributedFile::lockFileAttrLock")) + { + // In unlikely event File/Attr doesn't exist, must ensure created, commited and root connection is reloaded. + verifyex(attrLock.init(logicalName, DXB_SuperFile, RTM_LOCK_WRITE|RTM_CREATE_QUERY, conn, defaultTimeout, "CDistributedFile::lockFileAttrLock")); + attrLock.commit(); + conn->commit(); + conn->reload(); + root.setown(conn->getRoot()); + } + } + void updateFileAttrs() { if (subfiles.ordinality()==0) { From 35a34fdd35edfd7dc0b83a523ba865fa7d378b00 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 24 Oct 2024 12:51:11 +0100 Subject: [PATCH 06/13] HPCC-32789 Fix potential race deadlock in Thor splitter If the splitter reading arms (COutput) were reading from the same page (CRowSet chunk) as the write ahead was writing to, then the write ahead could expand that row set and cause the reader to read unexpected row data (e.g. null rows). This caused the splitter arm to premuturely finish, leaving the splitter unbalanced and stalling as the writeahead blocked soon afterwards since the finished arm was too far behind. The bug was that the row set should never expand. It is pre-sized to avoid that. However, the condition of 'fullness' was incorrect, relying only on a dynamic calculation of total row size. The fix is to also check that the number of rows does not exceed the capacity. NB: The bug predates the new splitter code, however the new splitter implementation also changed the way the splitter arms interacted with writeahead. Previously the arm would call writeahead once it hit max explicitly, rather than blocking in the underlying ISharedSmartBuffer implementation. But it would still be possible I think to hit this bug (albeit less likely). Signed-off-by: Jake Smith --- thorlcr/thorutil/thbuf.cpp | 10 +++++++--- thorlcr/thorutil/thmem.hpp | 4 +++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index ad2d42bdb60..8edf9dfd942 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -1272,6 +1272,10 @@ class CRowSet : public CSimpleInterface, implements IInterface { return rows.get(r); } + inline bool isFull() const + { + return rows.isFull(); + } }; class Chunk : public CInterface @@ -1463,8 +1467,7 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu } } rowsRead++; - const void *retrow = rowSet->getRow(row++); - return retrow; + return rowSet->getRow(row++); } virtual void stop() { @@ -1693,7 +1696,8 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu unsigned len=rowSize(row); CriticalBlock b(crit); bool paged = false; - if (totalOutChunkSize >= minChunkSize) // chunks required to be at least minChunkSize + // NB: The isFull condition ensures that we never expand inMemRows, which would cause a race with readers reading same set + if (totalOutChunkSize >= minChunkSize || inMemRows->isFull()) // chunks required to be at least minChunkSize, or if hits max capacity { unsigned reader=anyReaderBehind(); if (NotFound != reader) diff --git a/thorlcr/thorutil/thmem.hpp b/thorlcr/thorutil/thmem.hpp index a89d4cdd0f2..f642481f49b 100644 --- a/thorlcr/thorutil/thmem.hpp +++ b/thorlcr/thorutil/thmem.hpp @@ -327,7 +327,8 @@ class graph_decl CThorExpandingRowArray : public CSimpleInterface if (!resize(numRows+1)) return false; } - rows[numRows++] = row; + rows[numRows] = row; + numRows++; return true; } bool binaryInsert(const void *row, ICompare &compare, bool dropLast=false); // NB: takes ownership on success @@ -356,6 +357,7 @@ class graph_decl CThorExpandingRowArray : public CSimpleInterface } inline rowidx_t ordinality() const { return numRows; } inline rowidx_t queryMaxRows() const { return maxRows; } + inline bool isFull() const { return numRows >= maxRows; } inline const void **getRowArray() { return rows; } void swap(CThorExpandingRowArray &src); From b5e1b18d364c9a46e49950f98325cddf3a5c854f Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 25 Oct 2024 12:00:29 +0100 Subject: [PATCH 07/13] HPCC-32877 Fix abort in esp when accessing secrets from a vault The vault secret code uses httplib to request entries from a vault. The default compilation mode is to use select() rather than poll(), but select() has a limit of 1024 sockets. That means that if a process has a large number of sockets (or files?) open the select based code will fail. This commit defines CPPHTTPLIB_USE_POLL which enables the use of poll() Signed-off-by: Gavin Halliday --- system/jlib/CMakeLists.txt | 3 +++ system/metrics/sinks/prometheus/CMakeLists.txt | 3 +++ 2 files changed, 6 insertions(+) diff --git a/system/jlib/CMakeLists.txt b/system/jlib/CMakeLists.txt index 9fa8175eabc..1a88f03d3a1 100644 --- a/system/jlib/CMakeLists.txt +++ b/system/jlib/CMakeLists.txt @@ -223,6 +223,9 @@ include_directories ( ${CMAKE_BINARY_DIR}/oss ) +# ensure httplib uses poll rather than select - otherwise it fail if too many sockets have been opened. +ADD_DEFINITIONS( -DCPPHTTPLIB_USE_POLL ) + ADD_DEFINITIONS( -D_USRDLL -DJLIB_EXPORTS ) HPCC_ADD_LIBRARY( jlib SHARED ${SRCS} ${INCLUDES} ) diff --git a/system/metrics/sinks/prometheus/CMakeLists.txt b/system/metrics/sinks/prometheus/CMakeLists.txt index c2454e1eadc..f2a89ad3f2d 100644 --- a/system/metrics/sinks/prometheus/CMakeLists.txt +++ b/system/metrics/sinks/prometheus/CMakeLists.txt @@ -30,6 +30,9 @@ include_directories( ${HPCC_SOURCE_DIR}/system/httplib ) +# ensure httplib uses poll rather than select - otherwise it fail if too many sockets have been opened. +ADD_DEFINITIONS( -DCPPHTTPLIB_USE_POLL ) + SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STRICT_CXX_FLAGS}") ADD_DEFINITIONS( -DPROMETHEUSSINK_EXPORTS ) HPCC_ADD_LIBRARY( hpccmetrics_prometheussink SHARED ${srcs} ) From bb7b3b9f02194b87d213465ee9fcc41d5e61ba49 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Mon, 28 Oct 2024 16:58:05 +0000 Subject: [PATCH 08/13] HPCC-32896 Fix Roxie clone file TLK meta loss bug Roxie's clone file code was not copying the kind attribute from the source file. This meant the meta info for the TLK part was lost. Signed-off-by: Jake Smith --- dali/dfu/dfuutil.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/dali/dfu/dfuutil.cpp b/dali/dfu/dfuutil.cpp index 6f7bb178190..6cb0cd1ceee 100644 --- a/dali/dfu/dfuutil.cpp +++ b/dali/dfu/dfuutil.cpp @@ -530,13 +530,18 @@ class CFileCloner if (iskey&&!cluster2.isEmpty()) dstfdesc->addCluster(cluster2,grp2,spec2); - for (unsigned pn=0; pnqueryPart(pn)->queryProperties().getPropInt64("@size",-1); + for (unsigned pn=0; pnqueryPart(pn)->queryProperties(); + IPropertyTree &dstProps = dstfdesc->queryPart(pn)->queryProperties(); + offset_t sz = srcProps.getPropInt64("@size",-1); if (sz!=(offset_t)-1) - dstfdesc->queryPart(pn)->queryProperties().setPropInt64("@size",sz); + dstProps.setPropInt64("@size",sz); StringBuffer dates; - if (srcfdesc->queryPart(pn)->queryProperties().getProp("@modified",dates)) - dstfdesc->queryPart(pn)->queryProperties().setProp("@modified",dates.str()); + if (srcProps.getProp("@modified",dates)) + dstProps.setProp("@modified",dates.str()); + if (srcProps.hasProp("@kind")) + dstProps.setProp("@kind", srcProps.queryProp("@kind")); } if (!copyphysical) //cloneFrom tells roxie where to copy from.. it's unnecessary if we already did the copy From c415ffd937260747d5662d2542006847b136724b Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 22 Oct 2024 10:43:07 +0100 Subject: [PATCH 09/13] HPCC-32845 Guard against KJ reading TLKs as regular index parts Check that the last part in an index/subindex is a TLK if the meta data is missing. Also remove some redundant 'delayed' functionality. Signed-off-by: Jake Smith --- .../activities/hashdistrib/thhashdistrib.cpp | 3 +- .../activities/indexwrite/thindexwrite.cpp | 4 +-- thorlcr/activities/keydiff/thkeydiff.cpp | 5 +-- thorlcr/activities/keyedjoin/thkeyedjoin.cpp | 31 ++++++++--------- .../activities/keyedjoin/thkeyedjoinslave.cpp | 27 +++++---------- thorlcr/activities/keypatch/thkeypatch.cpp | 6 +--- thorlcr/thorutil/thormisc.cpp | 34 +++++++++++++++++++ thorlcr/thorutil/thormisc.hpp | 2 ++ 8 files changed, 65 insertions(+), 47 deletions(-) diff --git a/thorlcr/activities/hashdistrib/thhashdistrib.cpp b/thorlcr/activities/hashdistrib/thhashdistrib.cpp index b2c8a42e50f..36c2cba1194 100644 --- a/thorlcr/activities/hashdistrib/thhashdistrib.cpp +++ b/thorlcr/activities/hashdistrib/thhashdistrib.cpp @@ -26,6 +26,7 @@ #include "thorport.hpp" #include "thbufdef.hpp" #include "thexception.hpp" +#include "thormisc.hpp" #define NUMINPARALLEL 16 @@ -115,7 +116,7 @@ class IndexDistributeActivityMaster : public HashDistributeMasterBase checkFormatCrc(this, file, helper->getFormatCrc(), nullptr, helper->getFormatCrc(), nullptr, true); Owned fileDesc = file->getFileDescriptor(); Owned tlkDesc = fileDesc->getPart(fileDesc->numParts()-1); - if (!tlkDesc->queryProperties().hasProp("@kind") || 0 != stricmp("topLevelKey", tlkDesc->queryProperties().queryProp("@kind"))) + if (!hasTLK(*file, this)) throw MakeActivityException(this, 0, "Cannot distribute using a non-distributed key: '%s'", scoped.str()); unsigned location; OwnedIFile iFile; diff --git a/thorlcr/activities/indexwrite/thindexwrite.cpp b/thorlcr/activities/indexwrite/thindexwrite.cpp index 1a9f5894a87..b9c2963ff8a 100644 --- a/thorlcr/activities/indexwrite/thindexwrite.cpp +++ b/thorlcr/activities/indexwrite/thindexwrite.cpp @@ -159,9 +159,9 @@ class IndexWriteActivityMaster : public CMasterActivity checkFormatCrc(this, _f, helper->getFormatCrc(), nullptr, helper->getFormatCrc(), nullptr, true); IDistributedFile *f = _f->querySuperFile(); if (!f) f = _f; - Owned existingTlk = f->getPart(f->numParts()-1); - if (!existingTlk->queryAttributes().hasProp("@kind") || 0 != stricmp("topLevelKey", existingTlk->queryAttributes().queryProp("@kind"))) + if (!hasTLK(*f, this)) throw MakeActivityException(this, 0, "Cannot build new key '%s' based on non-distributed key '%s'", fileName.get(), diName.get()); + Owned existingTlk = f->getPart(f->numParts()-1); IPartDescriptor *tlkDesc = fileDesc->queryPart(fileDesc->numParts()-1); IPropertyTree &props = tlkDesc->queryProperties(); if (existingTlk->queryAttributes().hasProp("@size")) diff --git a/thorlcr/activities/keydiff/thkeydiff.cpp b/thorlcr/activities/keydiff/thkeydiff.cpp index 8f688ee8abd..394e6b787f5 100644 --- a/thorlcr/activities/keydiff/thkeydiff.cpp +++ b/thorlcr/activities/keydiff/thkeydiff.cpp @@ -71,10 +71,7 @@ class CKeyDiffMaster : public CMasterActivity originalDesc.setown(originalIndexFile->getFileDescriptor()); newIndexDesc.setown(newIndexFile->getFileDescriptor()); - Owned tlkDesc = originalDesc->getPart(originalDesc->numParts()-1); - const char *kind = tlkDesc->queryProperties().queryProp("@kind"); - local = NULL == kind || 0 != stricmp("topLevelKey", kind); - + local = !hasTLK(*originalIndexFile, this); if (!local) width--; // 1 part == No n distributed / Monolithic key if (width > container.queryJob().querySlaves()) diff --git a/thorlcr/activities/keyedjoin/thkeyedjoin.cpp b/thorlcr/activities/keyedjoin/thkeyedjoin.cpp index 38fabfca861..af5d197721f 100644 --- a/thorlcr/activities/keyedjoin/thkeyedjoin.cpp +++ b/thorlcr/activities/keyedjoin/thkeyedjoin.cpp @@ -112,20 +112,21 @@ class CKeyedJoinMaster : public CMasterActivity unsigned numParts = fileDesc->numParts(); unsigned nextGroupStartPos = 0; + IDistributedFile *subFile = file; for (unsigned p=0; pqueryPart(p); - const char *kind = isIndexWithTlk ? part->queryProperties().queryProp("@kind") : nullptr; - if (!kind || !strsame("topLevelKey", kind)) + unsigned partIdx = part->queryPartIndex(); + unsigned subFileNum = NotFound; + unsigned subPartIdx = partIdx; + if (superFileDesc) + { + superFileDesc->mapSubPart(partIdx, subFileNum, subPartIdx); + partIdx = superWidth*subFileNum+subPartIdx; + subFile = &super->querySubFile(subFileNum, true); + } + if (!isIndexWithTlk || (1 == numParts) || (subPartIdx < (subFile->numParts()-1)) || !hasTLK(*subFile, nullptr)) { - unsigned partIdx = part->queryPartIndex(); - unsigned subfile = NotFound; - unsigned subPartIdx = partIdx; - if (superFileDesc) - { - superFileDesc->mapSubPart(partIdx, subfile, subPartIdx); - partIdx = superWidth*subfile+subPartIdx; - } if (activity.local) { if (activity.queryContainer().queryLocalData()) @@ -234,7 +235,7 @@ class CKeyedJoinMaster : public CMasterActivity slaveParts.push_back(p); } if (superFileDesc) - partIdx = superWidth*subfile+subPartIdx; + partIdx = superWidth*subFileNum+subPartIdx; partsByPartIdx.push_back(partIdx); assertex(partIdx < totalParts); partToSlave[partIdx] = mappedPos; @@ -387,10 +388,7 @@ class CKeyedJoinMaster : public CMasterActivity ForEach(*iter) { IDistributedFile &f = iter->query(); - unsigned np = f.numParts()-1; - IDistributedFilePart &part = f.queryPart(np); - const char *kind = part.queryAttributes().queryProp("@kind"); - bool hasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); // if last part not tlk, then deemed local (might be singlePartKey) + bool hasTlk = hasTLK(f, this); if (first) { first = false; @@ -419,8 +417,7 @@ class CKeyedJoinMaster : public CMasterActivity totalIndexParts = indexFile->numParts(); if (totalIndexParts) { - const char *kind = indexFile->queryPart(indexFile->numParts()-1).queryAttributes().queryProp("@kind"); - keyHasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); + keyHasTlk = hasTLK(*indexFile, this); if (keyHasTlk) --totalIndexParts; } diff --git a/thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp b/thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp index 8844a99b7f3..e3710511abe 100644 --- a/thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp +++ b/thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp @@ -1445,7 +1445,7 @@ class CKeyedJoinSlave : public CSlaveActivity, implements IJoinProcessor, implem { unsigned partNo = partCopy & partMask; unsigned copy = partCopy >> partBits; - Owned keyIndex = activity.createPartKeyIndex(partNo, copy, false); + Owned keyIndex = activity.createPartKeyIndex(partNo, copy); partKeySet->addIndex(keyIndex.getClear()); } keyManager = createKeyMerger(helper->queryIndexRecordSize()->queryRecordAccessor(true), partKeySet, 0, &contextLogger, helper->hasNewSegmentMonitors(), false); @@ -2454,7 +2454,7 @@ class CKeyedJoinSlave : public CSlaveActivity, implements IJoinProcessor, implem } return tlkKeyIndexes.ordinality(); } - IKeyIndex *createPartKeyIndex(unsigned partNo, unsigned copy, bool delayed) + IKeyIndex *createPartKeyIndex(unsigned partNo, unsigned copy) { IPartDescriptor &filePart = allIndexParts.item(partNo); unsigned crc=0; @@ -2464,25 +2464,16 @@ class CKeyedJoinSlave : public CSlaveActivity, implements IJoinProcessor, implem StringBuffer filename; rfn.getPath(filename); - if (delayed) - { - Owned lazyIFileIO = queryThor().queryFileCache().lookupIFileIO(*this, indexName, filePart, nullptr); - Owned delayedFile = createDelayedFile(lazyIFileIO); - return createKeyIndex(filename, crc, *delayedFile, (unsigned) -1, false, 0); - } - else - { - /* NB: createKeyIndex here, will load the key immediately - * But that's okay, because we are only here on demand. - * The underlying IFileIO can later be closed by fhe file caching mechanism. - */ - Owned lazyIFileIO = queryThor().queryFileCache().lookupIFileIO(*this, indexName, filePart, nullptr); - return createKeyIndex(filename, crc, *lazyIFileIO, (unsigned) -1, false, 0); - } + /* NB: createKeyIndex here, will load the key immediately + * But that's okay, because we are only here on demand. + * The underlying IFileIO can later be closed by fhe file caching mechanism. + */ + Owned lazyIFileIO = queryThor().queryFileCache().lookupIFileIO(*this, indexName, filePart, nullptr); + return createKeyIndex(filename, crc, *lazyIFileIO, (unsigned) -1, false, 0); } IKeyManager *createPartKeyManager(unsigned partNo, unsigned copy, IContextLogger *ctx) { - Owned keyIndex = createPartKeyIndex(partNo, copy, false); + Owned keyIndex = createPartKeyIndex(partNo, copy); return createLocalKeyManager(helper->queryIndexRecordSize()->queryRecordAccessor(true), keyIndex, ctx, helper->hasNewSegmentMonitors(), false); } const void *preparePendingLookupRow(void *row, size32_t maxSz, const void *lhsRow, size32_t keySz) diff --git a/thorlcr/activities/keypatch/thkeypatch.cpp b/thorlcr/activities/keypatch/thkeypatch.cpp index 3f279cbe82c..50cb4ec0354 100644 --- a/thorlcr/activities/keypatch/thkeypatch.cpp +++ b/thorlcr/activities/keypatch/thkeypatch.cpp @@ -71,11 +71,7 @@ class CKeyPatchMaster : public CMasterActivity originalDesc.setown(originalIndexFile->getFileDescriptor()); patchDesc.setown(patchFile->getFileDescriptor()); - - Owned tlkDesc = originalDesc->getPart(originalDesc->numParts()-1); - const char *kind = tlkDesc->queryProperties().queryProp("@kind"); - local = NULL == kind || 0 != stricmp("topLevelKey", kind); - + local = !hasTLK(*originalIndexFile, this); if (!local && width > 1) width--; // 1 part == No n distributed / Monolithic key if (width > container.queryJob().querySlaves()) diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index 395b0905131..12926c92956 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -29,6 +29,8 @@ #include "jsocket.hpp" #include "jmutex.hpp" +#include "jhtree.hpp" + #include "commonext.hpp" #include "dadfs.hpp" #include "dasds.hpp" @@ -1703,3 +1705,35 @@ void saveWuidToFile(const char *wuid) wuidFileIO->write(0, strlen(wuid), wuid); wuidFileIO->close(); } + +bool hasTLK(IDistributedFile &file, CActivityBase *activity) +{ + unsigned np = file.numParts(); + if (np<=1) // NB: a better test would be to only continue if this is a width that is +1 of group it's based on, but not worth checking + return false; + IDistributedFilePart &part = file.queryPart(np-1); + bool keyHasTlk = strisame("topLevelKey", part.queryAttributes().queryProp("@kind")); + if (!keyHasTlk) + { + // See HPCC-32845 - check if TLK flag is missing from TLK part + // It is very likely the last part should be a TLK. Even a local key (>1 parts) has a TLK by default (see buildLocalTlks) + RemoteFilename rfn; + part.getFilename(rfn); + StringBuffer filename; + rfn.getPath(filename); + Owned index = createKeyIndex(filename, 0, false, 0); + dbgassertex(index); + if (index->isTopLevelKey()) + { + if (activity) + { + Owned e = MakeActivityException(activity, 0, "TLK file part of file %s is missing kind=\"topLevelKey\" flag. The meta data should be fixed!", file.queryLogicalName()); + reportExceptionToWorkunitCheckIgnore(activity->queryJob().queryWorkUnit(), e, SeverityWarning); + StringBuffer errMsg; + UWARNLOG("%s", e->errorMessage(errMsg).str()); + } + keyHasTlk = true; + } + } + return keyHasTlk; +} diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index 59d1797d1e8..ed04efc98a8 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -723,4 +723,6 @@ class graph_decl CThorPerfTracer : protected PerfTracer extern graph_decl void saveWuidToFile(const char *wuid); +extern graph_decl bool hasTLK(IDistributedFile &file, CActivityBase *activity); + #endif From 4735ff97c672f9ce60274b2a6bc532f77e43b074 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Wed, 30 Oct 2024 10:23:51 +0000 Subject: [PATCH 10/13] HPCC-32907 Improve roxie startup time by removing unused option * The helper name has been computed for many years, but roxie still contained code to extract it from the graph if it was set. Signed-off-by: Gavin Halliday --- roxie/ccd/ccdquery.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/roxie/ccd/ccdquery.cpp b/roxie/ccd/ccdquery.cpp index 29c34c9fa2d..c983ebd0067 100644 --- a/roxie/ccd/ccdquery.cpp +++ b/roxie/ccd/ccdquery.cpp @@ -625,9 +625,7 @@ class CQueryFactory : implements IQueryFactory, implements IResourceContext, pub break; } StringBuffer helperName; - node.getProp("att[@name=\"helper\"]/@value", helperName); - if (!helperName.length()) - helperName.append("fAc").append(id); + helperName.append("fAc").append(id); HelperFactory *helperFactory = dll->getFactory(helperName); if (!helperFactory) throw MakeStringException(ROXIE_INTERNAL_ERROR, "Internal error: helper function %s not exported", helperName.str()); @@ -2002,9 +2000,7 @@ class CAgentQueryFactory : public CQueryFactory else { StringBuffer helperName; - node.getProp("att[@name=\"helper\"]/@value", helperName); - if (!helperName.length()) - helperName.append("fAc").append(node.getPropInt("@id", 0)); + helperName.append("fAc").append(node.getPropInt("@id", 0)); HelperFactory *helperFactory = dll->getFactory(helperName.str()); if (!helperFactory) throw MakeStringException(ROXIE_INTERNAL_ERROR, "Internal error: helper function %s not exported", helperName.str()); From 607ea8925d4235ef9c72bd17b06eeef10e8b43db Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Wed, 30 Oct 2024 14:45:08 +0000 Subject: [PATCH 11/13] HPCC-32910 Add time waiting for git lock to the workunit stats Signed-off-by: Gavin Halliday --- ecl/eclcc/eclcc.cpp | 3 +++ ecl/hql/hqlrepository.cpp | 11 +++++++++-- ecl/hql/hqlrepository.hpp | 1 + 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ecl/eclcc/eclcc.cpp b/ecl/eclcc/eclcc.cpp index 2aba8307f8c..9b1a959b28d 100644 --- a/ecl/eclcc/eclcc.cpp +++ b/ecl/eclcc/eclcc.cpp @@ -1535,8 +1535,11 @@ void EclCC::processSingleQuery(const EclRepositoryManager & localRepositoryManag updateWorkunitStat(instance.wu, SSToperation, ">compile:>parse", StTimeElapsed, NULL, parseTimeNs); stat_type sourceDownloadTime = localRepositoryManager.getStatistic(StTimeElapsed); + stat_type sourceDownloadBlockedTime = localRepositoryManager.getStatistic(StTimeBlocked); if (sourceDownloadTime) updateWorkunitStat(instance.wu, SSToperation, ">compile:>parse:>download", StTimeElapsed, NULL, sourceDownloadTime); + if (sourceDownloadBlockedTime) + updateWorkunitStat(instance.wu, SSToperation, ">compile:>parse:>download", StTimeBlocked, NULL, sourceDownloadBlockedTime); if (optExtraStats) { diff --git a/ecl/hql/hqlrepository.cpp b/ecl/hql/hqlrepository.cpp index 06692728bb1..1c0d2507a21 100644 --- a/ecl/hql/hqlrepository.cpp +++ b/ecl/hql/hqlrepository.cpp @@ -676,6 +676,8 @@ unsigned __int64 EclRepositoryManager::getStatistic(StatisticKind kind) const { case StTimeElapsed: return cycle_to_nanosec(gitDownloadCycles); + case StTimeBlocked: + return cycle_to_nanosec(gitDownloadBlockedCycles); } return 0; } @@ -823,7 +825,12 @@ IEclSourceCollection * EclRepositoryManager::resolveGitCollection(const char * r throw makeStringExceptionV(99, "Unsupported repository link format '%s'", defaultUrl); bool alreadyExists = false; + + CCycleTimer gitDownloadTimer; Owned gitUpdateLock(getGitUpdateLock(repoPath)); + cycle_t blockedCycles = gitDownloadTimer.elapsedCycles(); + gitDownloadBlockedCycles += blockedCycles; + if (checkDirExists(repoPath)) { if (options.cleanRepos) @@ -853,7 +860,6 @@ IEclSourceCollection * EclRepositoryManager::resolveGitCollection(const char * r bool ok = false; Owned error; - CCycleTimer gitDownloadTimer; if (alreadyExists) { if (options.updateRepos) @@ -890,7 +896,8 @@ IEclSourceCollection * EclRepositoryManager::resolveGitCollection(const char * r //this could become a read/write lock if that proved to be an issue. gitUpdateLock.clear(); - gitDownloadCycles += gitDownloadTimer.elapsedCycles(); + gitDownloadCycles += (gitDownloadTimer.elapsedCycles() - blockedCycles); + if (error) { if (errorReceiver) diff --git a/ecl/hql/hqlrepository.hpp b/ecl/hql/hqlrepository.hpp index 071139fe2da..3d4e5e1317d 100644 --- a/ecl/hql/hqlrepository.hpp +++ b/ecl/hql/hqlrepository.hpp @@ -105,6 +105,7 @@ class HQL_API EclRepositoryManager IArrayOf overrideSources; // -D options IArrayOf allSources; // also includes -D options cycle_t gitDownloadCycles = 0; + cycle_t gitDownloadBlockedCycles = 0; //Include all options in a nested struct to make it easy to ensure they are cloned struct { From fcc5efc1325092943c6b23b48311e9610af09814 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 31 Oct 2024 11:49:42 +0000 Subject: [PATCH 12/13] HPCC-32875 Ensure rowservice can process fields stored as blobs Signed-off-by: Gavin Halliday --- fs/dafsserver/dafsserver.cpp | 14 ++++++++++++++ rtl/eclrtl/rtldynfield.cpp | 1 + rtl/eclrtl/rtlrecord.cpp | 8 ++++++++ system/jhtree/jhtree.cpp | 25 +++++++++++++++++-------- 4 files changed, 40 insertions(+), 8 deletions(-) diff --git a/fs/dafsserver/dafsserver.cpp b/fs/dafsserver/dafsserver.cpp index 5e94f8b8c8f..2e1e4aaa5b9 100644 --- a/fs/dafsserver/dafsserver.cpp +++ b/fs/dafsserver/dafsserver.cpp @@ -2278,6 +2278,7 @@ class CRemoteIndexReadActivity : public CRemoteIndexBaseActivity Owned translator; unsigned __int64 chooseN = 0; + bool cleanupBlobs = false; public: CRemoteIndexReadActivity(IPropertyTree &config, IFileDescriptor *fileDesc) : PARENT(config, fileDesc) { @@ -2316,6 +2317,12 @@ class CRemoteIndexReadActivity : public CRemoteIndexBaseActivity } dbgassertex(retSz); + if (cleanupBlobs) + { + keyManager->releaseBlobs(); + cleanupBlobs = false; + } + const void *ret = outBuilder.getSelf(); outBuilder.finishRow(retSz); ++processed; @@ -2350,6 +2357,13 @@ class CRemoteIndexReadActivity : public CRemoteIndexBaseActivity { return out.appendf("indexread[%s]", fileName.get()); } + + virtual const byte * lookupBlob(unsigned __int64 id) override + { + size32_t dummy; + cleanupBlobs = true; + return (byte *) keyManager->loadBlob(id, dummy, nullptr); + } }; diff --git a/rtl/eclrtl/rtldynfield.cpp b/rtl/eclrtl/rtldynfield.cpp index 31d8ad9f02c..1687ccf1072 100644 --- a/rtl/eclrtl/rtldynfield.cpp +++ b/rtl/eclrtl/rtldynfield.cpp @@ -1690,6 +1690,7 @@ class GeneralRecordTranslator : public CInterfaceOf { const RtlRecord *subDest = destRecInfo.queryNested(idx); const RtlRecord *subSrc = sourceRecInfo.queryNested(info.matchIdx); + assertex(subSrc); info.subTrans = new GeneralRecordTranslator(*subDest, *subSrc, binarySource); if (!info.subTrans->needsTranslate()) { diff --git a/rtl/eclrtl/rtlrecord.cpp b/rtl/eclrtl/rtlrecord.cpp index 4c27b8cd05c..fb06d6b74e4 100644 --- a/rtl/eclrtl/rtlrecord.cpp +++ b/rtl/eclrtl/rtlrecord.cpp @@ -292,6 +292,12 @@ RtlRecord::RtlRecord(const RtlFieldInfo * const *_fields, bool expandFields) : f const RtlTypeInfo *curType = queryType(i); if (!curType->isFixedSize() || (fields[i]->flags & RFTMinifblock)) numVarFields++; + if (curType->isBlob()) + { + curType = curType->queryChildType(); + if (unlikely(!curType)) + throwUnexpectedX("Blob type has no child type"); + } if (curType->getType()==type_table || curType->getType()==type_record || curType->getType()==type_dictionary) numTables++; } @@ -331,6 +337,8 @@ RtlRecord::RtlRecord(const RtlFieldInfo * const *_fields, bool expandFields) : f curVariable++; fixedOffset = 0; } + if (curType->isBlob()) + curType = curType->queryChildType(); switch (curType->getType()) { case type_table: diff --git a/system/jhtree/jhtree.cpp b/system/jhtree/jhtree.cpp index 2ba4ba5e961..8148c37477c 100644 --- a/system/jhtree/jhtree.cpp +++ b/system/jhtree/jhtree.cpp @@ -3636,7 +3636,7 @@ class IKeyManagerTest : public CppUnit::TestFixture { const char *json = variable ? "{ \"ty1\": { \"fieldType\": 4, \"length\": 10 }, " - " \"ty2\": { \"fieldType\": 15, \"length\": 8 }, " + " \"ty2\": { \"fieldType\": 15, \"length\": 8, \"child\": \"ty1\" }, " " \"fieldType\": 13, \"length\": 10, " " \"fields\": [ " " { \"name\": \"f1\", \"type\": \"ty1\", \"flags\": 4 }, " @@ -3816,13 +3816,22 @@ class IKeyManagerTest : public CppUnit::TestFixture void testKeys() { - ASSERT(sizeof(CKeyIdAndPos) == sizeof(unsigned __int64) + sizeof(offset_t)); - for (bool var : { true, false }) - for (bool trail : { false, true }) - for (bool noseek : { false, true }) - for (bool quick : { true, false }) - for (const char * compression : { (const char *)nullptr, "POC", "inplace" }) - testKeys(var, trail, noseek, quick, compression); + try + { + ASSERT(sizeof(CKeyIdAndPos) == sizeof(unsigned __int64) + sizeof(offset_t)); + for (bool var : { true, false }) + for (bool trail : { false, true }) + for (bool noseek : { false, true }) + for (bool quick : { true, false }) + for (const char * compression : { (const char *)nullptr, "POC", "inplace" }) + testKeys(var, trail, noseek, quick, compression); + } + catch (IException * e) + { + StringBuffer s; + e->errorMessage(s); + CPPUNIT_ASSERT_MESSAGE(s.str(), false); + } } }; From b1e91b006413abe5c5d04abc107c43be49591dc0 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 1 Nov 2024 09:52:16 +0000 Subject: [PATCH 13/13] HPCC-32915 Query Metrics causing multiple calls to WUDetails Signed-off-by: Gordon Smith --- esp/src/src-react/components/Metrics.tsx | 3 + esp/src/src-react/hooks/metrics.ts | 177 ++++++++++++----------- esp/src/src-react/hooks/workunit.ts | 2 +- 3 files changed, 99 insertions(+), 83 deletions(-) diff --git a/esp/src/src-react/components/Metrics.tsx b/esp/src/src-react/components/Metrics.tsx index 3d1f91cc2bd..65996c00a9f 100644 --- a/esp/src/src-react/components/Metrics.tsx +++ b/esp/src/src-react/components/Metrics.tsx @@ -56,6 +56,9 @@ export const Metrics: React.FunctionComponent = ({ selection, fullscreen = false }) => { + if (querySet && queryId) { + wuid = ""; + } const [_uiState, _setUIState] = React.useState({ ...defaultUIState }); const [selectedMetricsSource, setSelectedMetricsSource] = React.useState(""); const [selectedMetrics, setSelectedMetrics] = React.useState([]); diff --git a/esp/src/src-react/hooks/metrics.ts b/esp/src/src-react/hooks/metrics.ts index 2b0a47687f2..6afbcff8f32 100644 --- a/esp/src/src-react/hooks/metrics.ts +++ b/esp/src/src-react/hooks/metrics.ts @@ -5,6 +5,7 @@ import { scopedLogger } from "@hpcc-js/util"; import { singletonHook } from "react-singleton-hook"; import { userKeyValStore } from "src/KeyValStore"; import { DockPanelLayout } from "../layouts/DockPanel"; +import { singletonDebounce } from "../util/throttle"; import { useWorkunit } from "./workunit"; import { useQuery } from "./query"; import { useCounter } from "./util"; @@ -214,6 +215,8 @@ function useMetricsViewsImpl(): useMetricsViewsResult { export const useMetricsViews = singletonHook(defaultState, useMetricsViewsImpl); +let wuDetailsMetaResponse: Promise; + export function useMetricMeta(): [string[], string[]] { const service = useConst(() => new WorkunitsService({ baseUrl: "" })); @@ -221,7 +224,10 @@ export function useMetricMeta(): [string[], string[]] { const [properties, setProperties] = React.useState([]); React.useEffect(() => { - service?.WUDetailsMeta({}).then(response => { + if (!wuDetailsMetaResponse && service) { + wuDetailsMetaResponse = service.WUDetailsMeta({}); + } + wuDetailsMetaResponse?.then(response => { setScopeTypes(response?.ScopeTypes?.ScopeType || []); setProperties((response?.Properties?.Property.map(p => p.Name) || []).sort()); }); @@ -274,45 +280,48 @@ export function useWorkunitMetrics( const [count, increment] = useCounter(); React.useEffect(() => { - setStatus(FetchStatus.STARTED); - workunit?.fetchDetailsNormalized({ - ScopeFilter: scopeFilter, - NestedFilter: nestedFilter, - PropertiesToReturn: { - AllScopes: true, - AllAttributes: true, - AllProperties: true, - AllNotes: true, - AllStatistics: true, - AllHints: true - }, - ScopeOptions: { - IncludeId: true, - IncludeScope: true, - IncludeScopeType: true, - IncludeMatchedScopesInResults: true - }, - PropertyOptions: { - IncludeName: true, - IncludeRawValue: true, - IncludeFormatted: true, - IncludeMeasure: true, - IncludeCreator: false, - IncludeCreatorType: false - } - }).then(response => { - setData(response?.data); - setColumns(response?.columns); - setActivities(response?.meta?.Activities?.Activity || []); - setProperties(response?.meta?.Properties?.Property || []); - setMeasures(response?.meta?.Measures?.Measure || []); - setScopeTypes(response?.meta?.ScopeTypes?.ScopeType || []); - }).catch(e => { - logger.error(e); - }).finally(() => { - setStatus(FetchStatus.COMPLETE); - }); - }, [workunit, state, count, scopeFilter, nestedFilter]); + if (wuid && workunit) { + const fetchDetailsNormalized = singletonDebounce(workunit, "fetchDetailsNormalized"); + setStatus(FetchStatus.STARTED); + fetchDetailsNormalized({ + ScopeFilter: scopeFilter, + NestedFilter: nestedFilter, + PropertiesToReturn: { + AllScopes: true, + AllAttributes: true, + AllProperties: true, + AllNotes: true, + AllStatistics: true, + AllHints: true + }, + ScopeOptions: { + IncludeId: true, + IncludeScope: true, + IncludeScopeType: true, + IncludeMatchedScopesInResults: true + }, + PropertyOptions: { + IncludeName: true, + IncludeRawValue: true, + IncludeFormatted: true, + IncludeMeasure: true, + IncludeCreator: false, + IncludeCreatorType: false + } + }).then(response => { + setData(response?.data); + setColumns(response?.columns); + setActivities(response?.meta?.Activities?.Activity || []); + setProperties(response?.meta?.Properties?.Property || []); + setMeasures(response?.meta?.Measures?.Measure || []); + setScopeTypes(response?.meta?.ScopeTypes?.ScopeType || []); + }).catch(e => { + logger.error(e); + }).finally(() => { + setStatus(FetchStatus.COMPLETE); + }); + } + }, [workunit, state, count, scopeFilter, nestedFilter, wuid]); return { metrics: data, columns, activities, properties, measures, scopeTypes, status, refresh: increment }; } @@ -335,45 +344,48 @@ export function useQueryMetrics( const [count, increment] = useCounter(); React.useEffect(() => { - setStatus(FetchStatus.STARTED); - query?.fetchDetailsNormalized({ - ScopeFilter: scopeFilter, - NestedFilter: nestedFilter, - PropertiesToReturn: { - AllScopes: true, - AllAttributes: true, - AllProperties: true, - AllNotes: true, - AllStatistics: true, - AllHints: true - }, - ScopeOptions: { - IncludeId: true, - IncludeScope: true, - IncludeScopeType: true, - IncludeMatchedScopesInResults: true - }, - PropertyOptions: { - IncludeName: true, - IncludeRawValue: true, - IncludeFormatted: true, - IncludeMeasure: true, - IncludeCreator: false, - IncludeCreatorType: false - } - }).then(response => { - setData(response?.data); - setColumns(response?.columns); - setActivities(response?.meta?.Activities?.Activity || []); - setProperties(response?.meta?.Properties?.Property || []); - setMeasures(response?.meta?.Measures?.Measure || []); - setScopeTypes(response?.meta?.ScopeTypes?.ScopeType || []); - }).catch(e => { - logger.error(e); - }).finally(() => { - setStatus(FetchStatus.COMPLETE); - }); - }, [query, state, count, scopeFilter, nestedFilter]); + if (querySet && queryId && query) { + const fetchDetailsNormalized = singletonDebounce(query, "fetchDetailsNormalized"); + setStatus(FetchStatus.STARTED); + fetchDetailsNormalized({ + ScopeFilter: scopeFilter, + NestedFilter: nestedFilter, + PropertiesToReturn: { + AllScopes: true, + AllAttributes: true, + AllProperties: true, + AllNotes: true, + AllStatistics: true, + AllHints: true + }, + ScopeOptions: { + IncludeId: true, + IncludeScope: true, + IncludeScopeType: true, + IncludeMatchedScopesInResults: true + }, + PropertyOptions: { + IncludeName: true, + IncludeRawValue: true, + IncludeFormatted: true, + IncludeMeasure: true, + IncludeCreator: false, + IncludeCreatorType: false + } + }).then(response => { + setData(response?.data); + setColumns(response?.columns); + setActivities(response?.meta?.Activities?.Activity || []); + setProperties(response?.meta?.Properties?.Property || []); + setMeasures(response?.meta?.Measures?.Measure || []); + setScopeTypes(response?.meta?.ScopeTypes?.ScopeType || []); + }).catch(e => { + logger.error(e); + }).finally(() => { + setStatus(FetchStatus.COMPLETE); + }); + } + }, [query, state, count, scopeFilter, nestedFilter, querySet, queryId]); return { metrics: data, columns, activities, properties, measures, scopeTypes, status, refresh: increment }; } @@ -385,7 +397,8 @@ export function useWUQueryMetrics( scopeFilter: Partial = scopeFilterDefault, nestedFilter: WsWorkunits.NestedFilter = nestedFilterDefault ): useMetricsResult { - const wuMetrics = useWorkunitMetrics(wuid, scopeFilter, nestedFilter); - const queryMetrics = useQueryMetrics(querySet, queryId, scopeFilter, nestedFilter); - return querySet && queryId ? { ...queryMetrics } : { ...wuMetrics }; + const isQuery = querySet && queryId; + const wuMetrics = useWorkunitMetrics(isQuery ? "" : wuid, scopeFilter, nestedFilter); + const queryMetrics = useQueryMetrics(isQuery ? querySet : "", isQuery ? queryId : "", scopeFilter, nestedFilter); + return isQuery ? { ...queryMetrics } : { ...wuMetrics }; } diff --git a/esp/src/src-react/hooks/workunit.ts b/esp/src/src-react/hooks/workunit.ts index 3bc1c22314a..089b25d883f 100644 --- a/esp/src/src-react/hooks/workunit.ts +++ b/esp/src/src-react/hooks/workunit.ts @@ -16,7 +16,7 @@ export function useWorkunit(wuid: string, full: boolean = false): [Workunit, WUS const [retVal, setRetVal] = React.useState<{ workunit: Workunit, state: number, lastUpdate: number, isComplete: boolean, refresh: RefreshFunc }>(); React.useEffect(() => { - if (wuid === undefined || wuid === null) { + if (!wuid) { setRetVal({ workunit: undefined, state: WUStateID.NotFound, lastUpdate: Date.now(), isComplete: undefined, refresh: (full?: boolean) => Promise.resolve(undefined) }); return; }